diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json deleted file mode 100644 index 677858f3a..000000000 --- a/Godeps/Godeps.json +++ /dev/null @@ -1,277 +0,0 @@ -{ - "ImportPath": "github.com/influxdb/telegraf", - "GoVersion": "go1.5.1", - "Deps": [ - { - "ImportPath": "bitbucket.org/ww/goautoneg", - "Comment": "null-5", - "Rev": "75cd24fc2f2c2a2088577d12123ddee5f54e0675" - }, - { - "ImportPath": "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git", - "Comment": "v0.9.1-14-g546c47a", - "Rev": "546c47a6d0e9492e77f6f37473d59c36a708e08b" - }, - { - "ImportPath": "github.com/Shopify/sarama", - "Comment": "v1.4.3-45-g5b18996", - "Rev": "5b18996ef1cd555a60562ae4c5d7843ae137e12d" - }, - { - "ImportPath": "github.com/Sirupsen/logrus", - "Comment": "v0.8.6-7-g9c060de", - "Rev": "9c060de643590dae45da9d7c26276463bfc46fa0" - }, - { - "ImportPath": "github.com/amir/raidman", - "Rev": "6a8e089bbe32e6b907feae5ba688841974b3c339" - }, - { - "ImportPath": "github.com/armon/go-metrics", - "Rev": "b2d95e5291cdbc26997d1301a5e467ecbb240e25" - }, - { - "ImportPath": "github.com/beorn7/perks/quantile", - "Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d" - }, - { - "ImportPath": "github.com/boltdb/bolt", - "Comment": "v1.0-117-g0f053fa", - "Rev": "0f053fabc06119583d61937a0a06ef0ba0f1b301" - }, - { - "ImportPath": "github.com/cenkalti/backoff", - "Rev": "4dc77674aceaabba2c7e3da25d4c823edfb73f99" - }, - { - "ImportPath": "github.com/dancannon/gorethink/encoding", - "Comment": "v1.x.x-1-g786f12a", - "Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f" - }, - { - "ImportPath": "github.com/dancannon/gorethink/ql2", - "Comment": "v1.x.x-1-g786f12a", - "Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f" - }, - { - "ImportPath": "github.com/dancannon/gorethink/types", - "Comment": "v1.x.x-1-g786f12a", - "Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f" - }, - { - "ImportPath": "github.com/eapache/go-resiliency/breaker", - "Comment": "v1.0.0-1-ged0319b", - "Rev": "ed0319b32e66e3295db52695ba3ee493e823fbfe" - }, - { - "ImportPath": "github.com/eapache/queue", - "Comment": "v1.0.2", - "Rev": "ded5959c0d4e360646dc9e9908cff48666781367" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient", - "Rev": "ef410296f87750305e1e1acf9ad2ba3833dcb004" - }, - { - "ImportPath": "github.com/go-sql-driver/mysql", - "Comment": "v1.2-118-g3dd7008", - "Rev": "3dd7008ac1529aca1bcd8a9db75228a71ba23cac" - }, - { - "ImportPath": "github.com/gogo/protobuf/proto", - "Rev": "cabd153b69f71bab8b89fd667a2d9bb28c92ceb4" - }, - { - "ImportPath": "github.com/golang/protobuf/proto", - "Rev": "73aaaa9eb61d74fbf7e256ca586a3a565b308eea" - }, - { - "ImportPath": "github.com/golang/snappy", - "Rev": "723cc1e459b8eea2dea4583200fd60757d40097a" - }, - { - "ImportPath": "github.com/gonuts/go-shellquote", - "Rev": "e842a11b24c6abfb3dd27af69a17f482e4b483c2" - }, - { - "ImportPath": "github.com/hashicorp/go-msgpack/codec", - "Rev": "fa3f63826f7c23912c15263591e65d54d080b458" - }, - { - "ImportPath": "github.com/hashicorp/raft", - "Rev": "9b586e29edf1ed085b11da7772479ee45c433996" - }, - { - "ImportPath": "github.com/hashicorp/raft-boltdb", - "Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee" - }, - { - "ImportPath": "github.com/influxdb/influxdb", - "Comment": "v0.9.4-rc1-922-gb0e9f7e", - "Rev": "b0e9f7e844225b05abf9f4455229490f99348ac4" - }, - { - "ImportPath": "github.com/lib/pq", - "Comment": "go1.0-cutoff-59-gb269bd0", - "Rev": "b269bd035a727d6c1081f76e7a239a1b00674c40" - }, - { - "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", - "Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a" - }, - { - "ImportPath": "github.com/mreiferson/go-snappystream", - "Comment": "v0.2.3", - "Rev": "028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504" - }, - { - "ImportPath": "github.com/naoina/go-stringutil", - "Rev": "360db0db4b01d34e12a2ec042c09e7d37fece761" - }, - { - "ImportPath": "github.com/naoina/toml", - "Rev": "5811abcabb29d6af0fdf060f96d328962bd3cd5e" - }, - { - "ImportPath": "github.com/nsqio/go-nsq", - "Comment": "v1.0.5-6-g2118015", - "Rev": "2118015c120962edc5d03325c680daf3163a8b5f" - }, - { - "ImportPath": "github.com/pborman/uuid", - "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" - }, - { - "ImportPath": "github.com/prometheus/client_golang/prometheus", - "Comment": "0.7.0-52-ge51041b", - "Rev": "e51041b3fa41cece0dca035740ba6411905be473" - }, - { - "ImportPath": "github.com/prometheus/client_model/go", - "Comment": "model-0.0.2-12-gfa8ad6f", - "Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6" - }, - { - "ImportPath": "github.com/prometheus/common/expfmt", - "Rev": "369ec0491ce7be15431bd4f23b7fa17308f94190" - }, - { - "ImportPath": "github.com/prometheus/common/model", - "Rev": "369ec0491ce7be15431bd4f23b7fa17308f94190" - }, - { - "ImportPath": "github.com/prometheus/procfs", - "Rev": "454a56f35412459b5e684fd5ec0f9211b94f002a" - }, - { - "ImportPath": "github.com/samuel/go-zookeeper/zk", - "Rev": "5bb5cfc093ad18a28148c578f8632cfdb4d802e4" - }, - { - "ImportPath": "github.com/shirou/gopsutil/cpu", - "Comment": "1.0.0-208-g759e96e", - "Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323" - }, - { - "ImportPath": "github.com/shirou/gopsutil/disk", - "Comment": "1.0.0-208-g759e96e", - "Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323" - }, - { - "ImportPath": "github.com/shirou/gopsutil/docker", - "Comment": "1.0.0-208-g759e96e", - "Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323" - }, - { - "ImportPath": "github.com/shirou/gopsutil/host", - "Comment": "1.0.0-208-g759e96e", - "Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323" - }, - { - "ImportPath": "github.com/shirou/gopsutil/load", - "Comment": "1.0.0-208-g759e96e", - "Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323" - }, - { - "ImportPath": "github.com/shirou/gopsutil/mem", - "Comment": "1.0.0-208-g759e96e", - "Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323" - }, - { - "ImportPath": "github.com/shirou/gopsutil/net", - "Comment": "1.0.0-208-g759e96e", - "Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323" - }, - { - "ImportPath": "github.com/shirou/gopsutil/process", - "Comment": "1.0.0-208-g759e96e", - "Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323" - }, - { - "ImportPath": "github.com/streadway/amqp", - "Rev": "f4879ba28fffbb576743b03622a9ff20461826b2" - }, - { - "ImportPath": "github.com/stretchr/objx", - "Rev": "cbeaeb16a013161a98496fad62933b1d21786672" - }, - { - "ImportPath": "github.com/stretchr/testify/assert", - "Comment": "v1.0-21-gf552045", - "Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1" - }, - { - "ImportPath": "github.com/stretchr/testify/mock", - "Comment": "v1.0-21-gf552045", - "Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1" - }, - { - "ImportPath": "github.com/stretchr/testify/require", - "Comment": "v1.0-21-gf552045", - "Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1" - }, - { - "ImportPath": "github.com/stretchr/testify/suite", - "Comment": "v1.0-21-gf552045", - "Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1" - }, - { - "ImportPath": "github.com/wvanbergen/kafka/consumergroup", - "Rev": "b0e5c20a0d7c3ccfd37a5965ae30a3a0fd15945d" - }, - { - "ImportPath": "github.com/wvanbergen/kazoo-go", - "Rev": "02a3868e9b87153285439cd27a39c0a2984a13af" - }, - { - "ImportPath": "golang.org/x/crypto/bcrypt", - "Rev": "173ce04bfaf66c7bb0fa9d5c0bfd93e773909dbd" - }, - { - "ImportPath": "golang.org/x/crypto/blowfish", - "Rev": "173ce04bfaf66c7bb0fa9d5c0bfd93e773909dbd" - }, - { - "ImportPath": "golang.org/x/net/websocket", - "Rev": "db8e4de5b2d6653f66aea53094624468caad15d2" - }, - { - "ImportPath": "gopkg.in/dancannon/gorethink.v1", - "Comment": "v1.x.x", - "Rev": "8aca6ba2cc6e873299617d730fac0d7f6593113a" - }, - { - "ImportPath": "gopkg.in/fatih/pool.v2", - "Rev": "cba550ebf9bce999a02e963296d4bc7a486cb715" - }, - { - "ImportPath": "gopkg.in/mgo.v2", - "Comment": "r2015.06.03-3-g3569c88", - "Rev": "3569c88678d88179dcbd68d02ab081cbca3cd4d0" - }, - { - "ImportPath": "gopkg.in/yaml.v2", - "Rev": "7ad95dd0798a40da1ccdff6dff35fd177b5edf40" - } - ] -} diff --git a/Godeps/Readme b/Godeps/Readme deleted file mode 100644 index 4cdaa53d5..000000000 --- a/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore deleted file mode 100644 index f037d684e..000000000 --- a/Godeps/_workspace/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/pkg -/bin diff --git a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/Makefile b/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/Makefile deleted file mode 100644 index e33ee1730..000000000 --- a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -include $(GOROOT)/src/Make.inc - -TARG=bitbucket.org/ww/goautoneg -GOFILES=autoneg.go - -include $(GOROOT)/src/Make.pkg - -format: - gofmt -w *.go - -docs: - gomake clean - godoc ${TARG} > README.txt diff --git a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/README.txt b/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d5..000000000 --- a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/autoneg.go b/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index 648b38cb6..000000000 --- a/Godeps/_workspace/src/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/.gitignore b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/.gitignore deleted file mode 100644 index 47bb0de48..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/.gitignore +++ /dev/null @@ -1,36 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -*.msg -*.lok - -samples/trivial -samples/trivial2 -samples/sample -samples/reconnect -samples/ssl -samples/custom_store -samples/simple -samples/stdinpub -samples/stdoutsub -samples/routing \ No newline at end of file diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/CONTRIBUTING.md b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/CONTRIBUTING.md deleted file mode 100644 index ce3771955..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/CONTRIBUTING.md +++ /dev/null @@ -1,69 +0,0 @@ -Contributing to Paho -==================== - -Thanks for your interest in this project. - -Project description: --------------------- - -The Paho project has been created to provide scalable open-source implementations of open and standard messaging protocols aimed at new, existing, and emerging applications for Machine-to-Machine (M2M) and Internet of Things (IoT). -Paho reflects the inherent physical and cost constraints of device connectivity. Its objectives include effective levels of decoupling between devices and applications, designed to keep markets open and encourage the rapid growth of scalable Web and Enterprise middleware and applications. Paho is being kicked off with MQTT publish/subscribe client implementations for use on embedded platforms, along with corresponding server support as determined by the community. - -- https://projects.eclipse.org/projects/technology.paho - -Developer resources: --------------------- - -Information regarding source code management, builds, coding standards, and more. - -- https://projects.eclipse.org/projects/technology.paho/developer - -Contributor License Agreement: ------------------------------- - -Before your contribution can be accepted by the project, you need to create and electronically sign the Eclipse Foundation Contributor License Agreement (CLA). - -- http://www.eclipse.org/legal/CLA.php - -Contributing Code: ------------------- - -The Go client uses git with Gerrit for code review, use the following URLs for Gerrit access; - -ssh://@git.eclipse.org:29418/paho/org.eclipse.paho.mqtt.golang - -Configure a remote called review to push your changes to; - -git config remote.review.url ssh://@git.eclipse.org:29418/paho/org.eclipse.paho.mqtt.golang -git config remote.review.push HEAD:refs/for/ - -When you have made and committed a change you can push it to Gerrit for review with; - -git push review - -See https://wiki.eclipse.org/Gerrit for more details on how Gerrit is used in Eclipse, https://wiki.eclipse.org/Gerrit#Gerrit_Code_Review_Cheatsheet has some particularly useful information. - -Git commit messages should follow the style described here; - -http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html - -Contact: --------- - -Contact the project developers via the project's "dev" list. - -- https://dev.eclipse.org/mailman/listinfo/paho-dev - -Search for bugs: ----------------- - -This project uses Bugzilla to track ongoing development and issues. - -- https://bugs.eclipse.org/bugs/buglist.cgi?product=Paho&component=MQTT-Go - -Create a new bug: ------------------ - -Be sure to search for existing bugs before you create another one. Remember that contributions are always welcome! - -- https://bugs.eclipse.org/bugs/enter_bug.cgi?product=Paho diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/DISTRIBUTION b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/DISTRIBUTION deleted file mode 100644 index 34e49731d..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/DISTRIBUTION +++ /dev/null @@ -1,15 +0,0 @@ - - -Eclipse Distribution License - v 1.0 - -Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. - -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/LICENSE b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/LICENSE deleted file mode 100644 index aa7cc810f..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/LICENSE +++ /dev/null @@ -1,87 +0,0 @@ -Eclipse Public License - v 1.0 - -THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -1. DEFINITIONS - -"Contribution" means: - -a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and - -b) in the case of each subsequent Contributor: - -i) changes to the Program, and - -ii) additions to the Program; - -where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. - -"Contributor" means any person or entity that distributes the Program. - -"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. - -"Program" means the Contributions distributed in accordance with this Agreement. - -"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. - -2. GRANT OF RIGHTS - -a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. - -b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. - -c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. - -d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. - -3. REQUIREMENTS - -A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: - -a) it complies with the terms and conditions of this Agreement; and - -b) its license agreement: - -i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; - -ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; - -iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and - -iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. - -When the Program is made available in source code form: - -a) it must be made available under this Agreement; and - -b) a copy of this Agreement must be included with each copy of the Program. - -Contributors may not remove or alter any copyright notices contained within the Program. - -Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. - -4. COMMERCIAL DISTRIBUTION - -Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. - -For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. - -5. NO WARRANTY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. - -6. DISCLAIMER OF LIABILITY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -7. GENERAL - -If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. - -If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. - -All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. - -Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. - -This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. \ No newline at end of file diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/README.md b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/README.md deleted file mode 100644 index cc26f0759..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/README.md +++ /dev/null @@ -1,62 +0,0 @@ -Eclipse Paho MQTT Go client -=========================== - - -This repository contains the source code for the [Eclipse Paho](http://eclipse.org/paho) MQTT Go client library. - -This code builds a library which enable applications to connect to an [MQTT](http://mqtt.org) broker to publish messages, and to subscribe to topics and receive published messages. - -This library supports a fully asynchronous mode of operation. - - -Installation and Build ----------------------- - -This client is designed to work with the standard Go tools, so installation is as easy as: - -``` -go get git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git -``` - -The client depends on Google's [websockets](http://godoc.org/code.google.com/p/go.net/websocket) package, -also easily installed with the command: - -``` -go get code.google.com/p/go.net/websocket -``` - - -Usage and API -------------- - -Detailed API documentation is available by using to godoc tool, or can be browsed online -using the [godoc.org](http://godoc.org/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git) service. - -Make use of the library by importing it in your Go client source code. For example, -``` -import MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" -``` - -Samples are available in the `/samples` directory for reference. - - -Runtime tracing ---------------- - -Tracing is enabled by assigning logs (from the Go log package) to the logging endpoints, ERROR, CRITICAL, WARN and DEBUG - - -Reporting bugs --------------- - -Please report bugs under the "MQTT-Go" Component in [Eclipse Bugzilla](http://bugs.eclipse.org/bugs/) for the Paho Technology project. This is a very new library as of Q1 2014, so there are sure to be bugs. - - -More information ----------------- - -Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list](https://dev.eclipse.org/mailman/listinfo/paho-dev). - -General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt). - -There is much more information available via the [MQTT community site](http://mqtt.org). diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/about.html b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/about.html deleted file mode 100644 index b183f417a..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/about.html +++ /dev/null @@ -1,41 +0,0 @@ - - - -About - - -

About This Content

- -

December 9, 2013

-

License

- -

The Eclipse Foundation makes available all content in this plug-in ("Content"). Unless otherwise -indicated below, the Content is provided to you under the terms and conditions of the -Eclipse Public License Version 1.0 ("EPL") and Eclipse Distribution License Version 1.0 ("EDL"). -A copy of the EPL is available at -http://www.eclipse.org/legal/epl-v10.html -and a copy of the EDL is available at -http://www.eclipse.org/org/documents/edl-v10.php. -For purposes of the EPL, "Program" will mean the Content.

- -

If you did not receive this Content directly from the Eclipse Foundation, the Content is -being redistributed by another party ("Redistributor") and different terms and conditions may -apply to your use of any object code in the Content. Check the Redistributor's license that was -provided with the Content. If no such license exists, contact the Redistributor. Unless otherwise -indicated below, the terms and conditions of the EPL still apply to any source code in the Content -and such source code may be obtained at http://www.eclipse.org.

- - -

Third Party Content

-

The Content includes items that have been sourced from third parties as set out below. If you - did not receive this Content directly from the Eclipse Foundation, the following is provided - for informational purposes only, and you should look to the Redistributor's license for - terms and conditions of use.

-

- None

-

-

- - - - diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/client.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/client.go deleted file mode 100644 index 1e5fd39a4..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/client.go +++ /dev/null @@ -1,517 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -// Package mqtt provides an MQTT v3.1.1 client library. -package mqtt - -import ( - "errors" - "fmt" - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" - "net" - "sync" - "time" -) - -// ClientInt is the interface definition for a Client as used by this -// library, the interface is primarily to allow mocking tests. -type ClientInt interface { - IsConnected() bool - Connect() Token - Disconnect(uint) - disconnect() - Publish(string, byte, bool, interface{}) Token - Subscribe(string, byte, MessageHandler) Token - SubscribeMultiple(map[string]byte, MessageHandler) Token - Unsubscribe(...string) Token -} - -// Client is an MQTT v3.1.1 client for communicating -// with an MQTT server using non-blocking methods that allow work -// to be done in the background. -// An application may connect to an MQTT server using: -// A plain TCP socket -// A secure SSL/TLS socket -// A websocket -// To enable ensured message delivery at Quality of Service (QoS) levels -// described in the MQTT spec, a message persistence mechanism must be -// used. This is done by providing a type which implements the Store -// interface. For convenience, FileStore and MemoryStore are provided -// implementations that should be sufficient for most use cases. More -// information can be found in their respective documentation. -// Numerous connection options may be specified by configuring a -// and then supplying a ClientOptions type. -type Client struct { - sync.RWMutex - messageIds - conn net.Conn - ibound chan packets.ControlPacket - obound chan *PacketAndToken - oboundP chan *PacketAndToken - msgRouter *router - stopRouter chan bool - incomingPubChan chan *packets.PublishPacket - errors chan error - stop chan struct{} - persist Store - options ClientOptions - lastContact lastcontact - pingOutstanding bool - connected bool - workers sync.WaitGroup -} - -// NewClient will create an MQTT v3.1.1 client with all of the options specified -// in the provided ClientOptions. The client must have the Start method called -// on it before it may be used. This is to make sure resources (such as a net -// connection) are created before the application is actually ready. -func NewClient(o *ClientOptions) *Client { - c := &Client{} - c.options = *o - - if c.options.Store == nil { - c.options.Store = NewMemoryStore() - } - switch c.options.ProtocolVersion { - case 3, 4: - c.options.protocolVersionExplicit = true - default: - c.options.ProtocolVersion = 4 - c.options.protocolVersionExplicit = false - } - c.persist = c.options.Store - c.connected = false - c.messageIds = messageIds{index: make(map[uint16]Token)} - c.msgRouter, c.stopRouter = newRouter() - c.msgRouter.setDefaultHandler(c.options.DefaultPublishHander) - return c -} - -// IsConnected returns a bool signifying whether -// the client is connected or not. -func (c *Client) IsConnected() bool { - c.RLock() - defer c.RUnlock() - return c.connected -} - -func (c *Client) setConnected(status bool) { - c.Lock() - defer c.Unlock() - c.connected = status -} - -//ErrNotConnected is the error returned from function calls that are -//made when the client is not connected to a broker -var ErrNotConnected = errors.New("Not Connected") - -// Connect will create a connection to the message broker -// If clean session is false, then a slice will -// be returned containing Receipts for all messages -// that were in-flight at the last disconnect. -// If clean session is true, then any existing client -// state will be removed. -func (c *Client) Connect() Token { - var err error - t := newToken(packets.Connect).(*ConnectToken) - DEBUG.Println(CLI, "Connect()") - - go func() { - var rc byte - cm := newConnectMsgFromOptions(&c.options) - - for _, broker := range c.options.Servers { - CONN: - DEBUG.Println(CLI, "about to write new connect msg") - c.conn, err = openConnection(broker, &c.options.TLSConfig, c.options.ConnectTimeout) - if err == nil { - DEBUG.Println(CLI, "socket connected to broker") - switch c.options.ProtocolVersion { - case 3: - DEBUG.Println(CLI, "Using MQTT 3.1 protocol") - cm.ProtocolName = "MQIsdp" - cm.ProtocolVersion = 3 - default: - DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol") - c.options.ProtocolVersion = 4 - cm.ProtocolName = "MQTT" - cm.ProtocolVersion = 4 - } - cm.Write(c.conn) - - rc = c.connect() - if rc != packets.Accepted { - c.conn.Close() - c.conn = nil - //if the protocol version was explicitly set don't do any fallback - if c.options.protocolVersionExplicit { - ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not CONN_ACCEPTED, but rather", packets.ConnackReturnCodes[rc]) - continue - } - if c.options.ProtocolVersion == 4 { - DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol") - c.options.ProtocolVersion = 3 - goto CONN - } - } - break - } else { - ERROR.Println(CLI, err.Error()) - WARN.Println(CLI, "failed to connect to broker, trying next") - rc = packets.ErrNetworkError - } - } - - if c.conn == nil { - ERROR.Println(CLI, "Failed to connect to a broker") - t.returnCode = rc - if rc != packets.ErrNetworkError { - t.err = packets.ConnErrors[rc] - } else { - t.err = fmt.Errorf("%s : %s", packets.ConnErrors[rc], err) - } - t.flowComplete() - return - } - - c.lastContact.update() - c.persist.Open() - - c.obound = make(chan *PacketAndToken, 100) - c.oboundP = make(chan *PacketAndToken, 100) - c.ibound = make(chan packets.ControlPacket) - c.errors = make(chan error) - c.stop = make(chan struct{}) - - c.incomingPubChan = make(chan *packets.PublishPacket, 100) - c.msgRouter.matchAndDispatch(c.incomingPubChan, c.options.Order, c) - - c.workers.Add(1) - go outgoing(c) - go alllogic(c) - - c.connected = true - DEBUG.Println(CLI, "client is connected") - if c.options.OnConnect != nil { - go c.options.OnConnect(c) - } - - if c.options.KeepAlive != 0 { - c.workers.Add(1) - go keepalive(c) - } - - // Take care of any messages in the store - //var leftovers []Receipt - if c.options.CleanSession == false { - //leftovers = c.resume() - } else { - c.persist.Reset() - } - - // Do not start incoming until resume has completed - c.workers.Add(1) - go incoming(c) - - DEBUG.Println(CLI, "exit startClient") - t.flowComplete() - }() - return t -} - -// internal function used to reconnect the client when it loses its connection -func (c *Client) reconnect() { - DEBUG.Println(CLI, "enter reconnect") - var rc byte = 1 - var sleep uint = 1 - var err error - - for rc != 0 { - cm := newConnectMsgFromOptions(&c.options) - - for _, broker := range c.options.Servers { - CONN: - DEBUG.Println(CLI, "about to write new connect msg") - c.conn, err = openConnection(broker, &c.options.TLSConfig, c.options.ConnectTimeout) - if err == nil { - DEBUG.Println(CLI, "socket connected to broker") - switch c.options.ProtocolVersion { - case 3: - DEBUG.Println(CLI, "Using MQTT 3.1 protocol") - cm.ProtocolName = "MQIsdp" - cm.ProtocolVersion = 3 - default: - DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol") - c.options.ProtocolVersion = 4 - cm.ProtocolName = "MQTT" - cm.ProtocolVersion = 4 - } - cm.Write(c.conn) - - rc = c.connect() - if rc != packets.Accepted { - c.conn.Close() - c.conn = nil - //if the protocol version was explicitly set don't do any fallback - if c.options.protocolVersionExplicit { - ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not Accepted, but rather", packets.ConnackReturnCodes[rc]) - continue - } - if c.options.ProtocolVersion == 4 { - DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol") - c.options.ProtocolVersion = 3 - goto CONN - } - } - break - } else { - ERROR.Println(CLI, err.Error()) - WARN.Println(CLI, "failed to connect to broker, trying next") - rc = packets.ErrNetworkError - } - } - if rc != 0 { - DEBUG.Println(CLI, "Reconnect failed, sleeping for", sleep, "seconds") - time.Sleep(time.Duration(sleep) * time.Second) - if sleep <= uint(c.options.MaxReconnectInterval.Seconds()) { - sleep *= 2 - } - } - } - - c.lastContact.update() - c.stop = make(chan struct{}) - - c.workers.Add(1) - go outgoing(c) - go alllogic(c) - - c.setConnected(true) - DEBUG.Println(CLI, "client is reconnected") - if c.options.OnConnect != nil { - go c.options.OnConnect(c) - } - - if c.options.KeepAlive != 0 { - c.workers.Add(1) - go keepalive(c) - } - c.workers.Add(1) - go incoming(c) -} - -// This function is only used for receiving a connack -// when the connection is first started. -// This prevents receiving incoming data while resume -// is in progress if clean session is false. -func (c *Client) connect() byte { - DEBUG.Println(NET, "connect started") - - ca, err := packets.ReadPacket(c.conn) - if err != nil { - ERROR.Println(NET, "connect got error", err) - //c.errors <- err - return packets.ErrNetworkError - } - msg := ca.(*packets.ConnackPacket) - - if msg == nil || msg.FixedHeader.MessageType != packets.Connack { - ERROR.Println(NET, "received msg that was nil or not CONNACK") - } else { - DEBUG.Println(NET, "received connack") - } - return msg.ReturnCode -} - -// Disconnect will end the connection with the server, but not before waiting -// the specified number of milliseconds to wait for existing work to be -// completed. -func (c *Client) Disconnect(quiesce uint) { - if !c.IsConnected() { - WARN.Println(CLI, "already disconnected") - return - } - DEBUG.Println(CLI, "disconnecting") - c.setConnected(false) - - dm := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket) - dt := newToken(packets.Disconnect) - c.oboundP <- &PacketAndToken{p: dm, t: dt} - - // wait for work to finish, or quiesce time consumed - dt.WaitTimeout(time.Duration(quiesce) * time.Millisecond) - c.disconnect() -} - -// ForceDisconnect will end the connection with the mqtt broker immediately. -func (c *Client) forceDisconnect() { - if !c.IsConnected() { - WARN.Println(CLI, "already disconnected") - return - } - c.setConnected(false) - c.conn.Close() - DEBUG.Println(CLI, "forcefully disconnecting") - c.disconnect() -} - -func (c *Client) internalConnLost(err error) { - close(c.stop) - c.conn.Close() - c.workers.Wait() - if c.IsConnected() { - if c.options.OnConnectionLost != nil { - go c.options.OnConnectionLost(c, err) - } - if c.options.AutoReconnect { - go c.reconnect() - } else { - c.setConnected(false) - } - } -} - -func (c *Client) disconnect() { - select { - case <-c.stop: - //someone else has already closed the channel, must be error - default: - close(c.stop) - } - c.conn.Close() - c.workers.Wait() - close(c.stopRouter) - DEBUG.Println(CLI, "disconnected") - c.persist.Close() -} - -// Publish will publish a message with the specified QoS -// and content to the specified topic. -// Returns a read only channel used to track -// the delivery of the message. -func (c *Client) Publish(topic string, qos byte, retained bool, payload interface{}) Token { - token := newToken(packets.Publish).(*PublishToken) - DEBUG.Println(CLI, "enter Publish") - if !c.IsConnected() { - token.err = ErrNotConnected - token.flowComplete() - return token - } - pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pub.Qos = qos - pub.TopicName = topic - pub.Retain = retained - switch payload.(type) { - case string: - pub.Payload = []byte(payload.(string)) - case []byte: - pub.Payload = payload.([]byte) - default: - token.err = errors.New("Unknown payload type") - token.flowComplete() - return token - } - - DEBUG.Println(CLI, "sending publish message, topic:", topic) - c.obound <- &PacketAndToken{p: pub, t: token} - return token -} - -// Subscribe starts a new subscription. Provide a MessageHandler to be executed when -// a message is published on the topic provided. -func (c *Client) Subscribe(topic string, qos byte, callback MessageHandler) Token { - token := newToken(packets.Subscribe).(*SubscribeToken) - DEBUG.Println(CLI, "enter Subscribe") - if !c.IsConnected() { - token.err = ErrNotConnected - token.flowComplete() - return token - } - sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket) - if err := validateTopicAndQos(topic, qos); err != nil { - token.err = err - return token - } - sub.Topics = append(sub.Topics, topic) - sub.Qoss = append(sub.Qoss, qos) - DEBUG.Println(sub.String()) - - if callback != nil { - c.msgRouter.addRoute(topic, callback) - } - - token.subs = append(token.subs, topic) - c.oboundP <- &PacketAndToken{p: sub, t: token} - DEBUG.Println(CLI, "exit Subscribe") - return token -} - -// SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to -// be executed when a message is published on one of the topics provided. -func (c *Client) SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token { - var err error - token := newToken(packets.Subscribe).(*SubscribeToken) - DEBUG.Println(CLI, "enter SubscribeMultiple") - if !c.IsConnected() { - token.err = ErrNotConnected - token.flowComplete() - return token - } - sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket) - if sub.Topics, sub.Qoss, err = validateSubscribeMap(filters); err != nil { - token.err = err - return token - } - - if callback != nil { - for topic := range filters { - c.msgRouter.addRoute(topic, callback) - } - } - token.subs = make([]string, len(sub.Topics)) - copy(token.subs, sub.Topics) - c.oboundP <- &PacketAndToken{p: sub, t: token} - DEBUG.Println(CLI, "exit SubscribeMultiple") - return token -} - -// Unsubscribe will end the subscription from each of the topics provided. -// Messages published to those topics from other clients will no longer be -// received. -func (c *Client) Unsubscribe(topics ...string) Token { - token := newToken(packets.Unsubscribe).(*UnsubscribeToken) - DEBUG.Println(CLI, "enter Unsubscribe") - if !c.IsConnected() { - token.err = ErrNotConnected - token.flowComplete() - return token - } - unsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket) - unsub.Topics = make([]string, len(topics)) - copy(unsub.Topics, topics) - - c.oboundP <- &PacketAndToken{p: unsub, t: token} - for _, topic := range topics { - c.msgRouter.deleteRoute(topic) - } - - DEBUG.Println(CLI, "exit Unsubscribe") - return token -} - -//DefaultConnectionLostHandler is a definition of a function that simply -//reports to the DEBUG log the reason for the client losing a connection. -func DefaultConnectionLostHandler(client *Client, reason error) { - DEBUG.Println("Connection lost:", reason.Error()) -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/components.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/components.go deleted file mode 100644 index 01f5fafdf..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/components.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -type component string - -// Component names for debug output -const ( - NET component = "[net] " - PNG component = "[pinger] " - CLI component = "[client] " - DEC component = "[decode] " - MES component = "[message] " - STR component = "[store] " - MID component = "[msgids] " - TST component = "[test] " - STA component = "[state] " - ERR component = "[error] " -) diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/edl-v10 b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/edl-v10 deleted file mode 100644 index cf989f145..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/edl-v10 +++ /dev/null @@ -1,15 +0,0 @@ - -Eclipse Distribution License - v 1.0 - -Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. - -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/epl-v10 b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/epl-v10 deleted file mode 100644 index 79e486c3d..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/epl-v10 +++ /dev/null @@ -1,70 +0,0 @@ -Eclipse Public License - v 1.0 - -THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -1. DEFINITIONS - -"Contribution" means: - -a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and -b) in the case of each subsequent Contributor: -i) changes to the Program, and -ii) additions to the Program; -where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. -"Contributor" means any person or entity that distributes the Program. - -"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. - -"Program" means the Contributions distributed in accordance with this Agreement. - -"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. - -2. GRANT OF RIGHTS - -a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. -b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. -c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. -d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. -3. REQUIREMENTS - -A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: - -a) it complies with the terms and conditions of this Agreement; and -b) its license agreement: -i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; -ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; -iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and -iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. -When the Program is made available in source code form: - -a) it must be made available under this Agreement; and -b) a copy of this Agreement must be included with each copy of the Program. -Contributors may not remove or alter any copyright notices contained within the Program. - -Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. - -4. COMMERCIAL DISTRIBUTION - -Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. - -For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. - -5. NO WARRANTY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. - -6. DISCLAIMER OF LIABILITY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -7. GENERAL - -If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. - -If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. - -All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. - -Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. - -This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/filestore.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/filestore.go deleted file mode 100644 index c4a0c8b91..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/filestore.go +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" - "io" - "io/ioutil" - "os" - "path" - "sync" -) - -const ( - msgExt = ".msg" - bkpExt = ".bkp" -) - -// FileStore implements the store interface using the filesystem to provide -// true persistence, even across client failure. This is designed to use a -// single directory per running client. If you are running multiple clients -// on the same filesystem, you will need to be careful to specify unique -// store directories for each. -type FileStore struct { - sync.RWMutex - directory string - opened bool -} - -// NewFileStore will create a new FileStore which stores its messages in the -// directory provided. -func NewFileStore(directory string) *FileStore { - store := &FileStore{ - directory: directory, - opened: false, - } - return store -} - -// Open will allow the FileStore to be used. -func (store *FileStore) Open() { - store.Lock() - defer store.Unlock() - // if no store directory was specified in ClientOpts, by default use the - // current working directory - if store.directory == "" { - store.directory, _ = os.Getwd() - } - - // if store dir exists, great, otherwise, create it - if !exists(store.directory) { - perms := os.FileMode(0770) - merr := os.MkdirAll(store.directory, perms) - chkerr(merr) - } - store.opened = true - DEBUG.Println(STR, "store is opened at", store.directory) -} - -// Close will disallow the FileStore from being used. -func (store *FileStore) Close() { - store.Lock() - defer store.Unlock() - store.opened = false - WARN.Println(STR, "store is not open") -} - -// Put will put a message into the store, associated with the provided -// key value. -func (store *FileStore) Put(key string, m packets.ControlPacket) { - store.Lock() - defer store.Unlock() - chkcond(store.opened) - full := fullpath(store.directory, key) - if exists(full) { - backup(store.directory, key) // make a copy of what already exists - defer unbackup(store.directory, key) - } - write(store.directory, key, m) - chkcond(exists(full)) -} - -// Get will retrieve a message from the store, the one associated with -// the provided key value. -func (store *FileStore) Get(key string) packets.ControlPacket { - store.RLock() - defer store.RUnlock() - chkcond(store.opened) - filepath := fullpath(store.directory, key) - if !exists(filepath) { - return nil - } - mfile, oerr := os.Open(filepath) - chkerr(oerr) - //all, rerr := ioutil.ReadAll(mfile) - //chkerr(rerr) - msg, rerr := packets.ReadPacket(mfile) - chkerr(rerr) - cerr := mfile.Close() - chkerr(cerr) - return msg -} - -// All will provide a list of all of the keys associated with messages -// currenly residing in the FileStore. -func (store *FileStore) All() []string { - store.RLock() - defer store.RUnlock() - return store.all() -} - -// Del will remove the persisted message associated with the provided -// key from the FileStore. -func (store *FileStore) Del(key string) { - store.Lock() - defer store.Unlock() - store.del(key) -} - -// Reset will remove all persisted messages from the FileStore. -func (store *FileStore) Reset() { - store.Lock() - defer store.Unlock() - WARN.Println(STR, "FileStore Reset") - for _, key := range store.all() { - store.del(key) - } -} - -// lockless -func (store *FileStore) all() []string { - chkcond(store.opened) - keys := []string{} - files, rderr := ioutil.ReadDir(store.directory) - chkerr(rderr) - for _, f := range files { - DEBUG.Println(STR, "file in All():", f.Name()) - key := f.Name()[0 : len(f.Name())-4] // remove file extension - keys = append(keys, key) - } - return keys -} - -// lockless -func (store *FileStore) del(key string) { - chkcond(store.opened) - DEBUG.Println(STR, "store del filepath:", store.directory) - DEBUG.Println(STR, "store delete key:", key) - filepath := fullpath(store.directory, key) - DEBUG.Println(STR, "path of deletion:", filepath) - if !exists(filepath) { - WARN.Println(STR, "store could not delete key:", key) - return - } - rerr := os.Remove(filepath) - chkerr(rerr) - DEBUG.Println(STR, "del msg:", key) - chkcond(!exists(filepath)) -} - -func fullpath(store string, key string) string { - p := path.Join(store, key+msgExt) - return p -} - -func bkppath(store string, key string) string { - p := path.Join(store, key+bkpExt) - return p -} - -// create file called "X.[messageid].msg" located in the store -// the contents of the file is the bytes of the message -// if a message with m's message id already exists, it will -// be overwritten -// X will be 'i' for inbound messages, and O for outbound messages -func write(store, key string, m packets.ControlPacket) { - filepath := fullpath(store, key) - f, err := os.Create(filepath) - chkerr(err) - werr := m.Write(f) - chkerr(werr) - cerr := f.Close() - chkerr(cerr) -} - -func exists(file string) bool { - if _, err := os.Stat(file); err != nil { - if os.IsNotExist(err) { - return false - } - chkerr(err) - } - return true -} - -func backup(store, key string) { - bkpp := bkppath(store, key) - fulp := fullpath(store, key) - backup, err := os.Create(bkpp) - chkerr(err) - mfile, oerr := os.Open(fulp) - chkerr(oerr) - _, cerr := io.Copy(backup, mfile) - chkerr(cerr) - clberr := backup.Close() - chkerr(clberr) - clmerr := mfile.Close() - chkerr(clmerr) -} - -// Identify .bkp files in the store and turn them into .msg files, -// whether or not it overwrites an existing file. This is safe because -// I'm copying the Paho Java client and they say it is. -func restore(store string) { - files, rderr := ioutil.ReadDir(store) - chkerr(rderr) - for _, f := range files { - fname := f.Name() - if len(fname) > 4 { - if fname[len(fname)-4:] == bkpExt { - key := fname[0 : len(fname)-4] - fulp := fullpath(store, key) - msg, cerr := os.Create(fulp) - chkerr(cerr) - bkpp := path.Join(store, fname) - bkp, oerr := os.Open(bkpp) - chkerr(oerr) - n, cerr := io.Copy(msg, bkp) - chkerr(cerr) - chkcond(n > 0) - clmerr := msg.Close() - chkerr(clmerr) - clberr := bkp.Close() - chkerr(clberr) - remerr := os.Remove(bkpp) - chkerr(remerr) - } - } - } -} - -func unbackup(store, key string) { - bkpp := bkppath(store, key) - remerr := os.Remove(bkpp) - chkerr(remerr) -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/README.md b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/README.md deleted file mode 100644 index 17790426f..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/README.md +++ /dev/null @@ -1,74 +0,0 @@ -FVT Instructions -================ - -The FVT tests are currenly only supported by [IBM MessageSight](http://www-03.ibm.com/software/products/us/en/messagesight/). - -Support for [mosquitto](http://mosquitto.org/) and [IBM Really Small Message Broker](https://www.ibm.com/developerworks/community/groups/service/html/communityview?communityUuid=d5bedadd-e46f-4c97-af89-22d65ffee070) might be added in the future. - - -IBM MessageSight Configuration ------------------------------- - -The IBM MessageSight Virtual Appliance can be downloaded here: -[Download](http://www-933.ibm.com/support/fixcentral/swg/selectFixes?parent=ibm~Other+software&product=ibm/Other+software/MessageSight&function=fixId&fixids=1.0.0.1-IMA-DeveloperImage&includeSupersedes=0 "IBM MessageSight") - -There is a nice blog post about it here: -[Blog](https://www.ibm.com/developerworks/community/blogs/c565c720-fe84-4f63-873f-607d87787327/entry/ibm_messagesight_for_developers_is_here?lang=en "Blog") - - -The virtual appliance must be installed into a virtual machine like -Oracle VirtualBox or VMWare Player. (Follow the instructions that come -with the download). - -Next, copy your authorized keys (basically a file containing the public -rsa key of your own computer) onto the appliance to enable passwordless ssh. - -For example, - - Console> user sshkey add "scp://user@host:~/.ssh/authorized_keys" - -More information can be found in the IBM MessageSight InfoCenter: -[InfoCenter](https://infocenters.hursley.ibm.com/ism/v1/help/index.jsp "InfoCenter") - -Now, execute the script setup_IMA.sh to create the objects necessary -to configure the server for the unit test cases provided. - -For example, - - ./setup_IMA.sh - -You should now be able to view the objects on your server: - - Console> imaserver show Endpoint Name=GoMqttEP1 - Name = GoMqttEP1 - Enabled = True - Port = 17001 - Protocol = MQTT - Interface = all - SecurityProfile = - ConnectionPolicies = GoMqttCP1 - MessagingPolicies = GoMqttMP1 - MaxMessageSize = 1024KB - MessageHub = GoMqttTestHub - Description = - - - -RSMB Configuration ------------------- -Wait for SSL support? - - -Mosquitto Configuration ------------------------ -Launch mosquitto from the fvt directory, specifiying mosquitto.cfg as config file - -``ex: /usr/bin/mosquitto -c ./mosquitto.cfg`` - -Note: Mosquitto requires SSL 1.1 or better, while Go 1.1.2 supports -only SSL v1.0. However, Go 1.2+ supports SSL v1.1 and SSL v1.2. - - -Other Notes ------------ -Go 1.1.2 does not support intermediate certificates, however Go 1.2+ does. diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/mosquitto.cfg b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/mosquitto.cfg deleted file mode 100644 index cddb94f31..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/mosquitto.cfg +++ /dev/null @@ -1,17 +0,0 @@ -allow_anonymous true -allow_duplicate_messages false -connection_messages true -log_dest stdout -log_timestamp true -log_type all -persistence false -bind_address 127.0.0.1 - -listener 17001 -listener 17002 -listener 17003 -listener 17004 - -#capath ../samples/samplecerts -#certfile ../samples/samplecerts/server-crt.pem -#keyfile ../samples/samplecerts/server-key.pem diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/rsmb.cfg b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/rsmb.cfg deleted file mode 100644 index 1dd77547b..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/rsmb.cfg +++ /dev/null @@ -1,8 +0,0 @@ -allow_anonymous false -bind_address 127.0.0.1 -connection_messages true -log_level detail - -listener 17001 -#listener 17003 -#listener 17004 \ No newline at end of file diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/setup_IMA.sh b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/setup_IMA.sh deleted file mode 100644 index 6ebdda3c2..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/setup_IMA.sh +++ /dev/null @@ -1,111 +0,0 @@ -#!/bin/bash - -####################################################################### -# This script is for configuring your IBM Messaging Appliance for use # -# as an mqtt test server for testing the go-mqtt open source client. # -# It creates the Policies and Endpoints necessary to test particular # -# features of the client, such as IPv6, SSL, and other things # -# # -# You do not need this script for any other purpose. # -####################################################################### - -# Edit options to match your configuration -IMA_HOST=9.41.55.184 -IMA_USER=admin -HOST=9.41.55.146 -USER=root -CERTDIR=~/GO/src/github.com/shoenig/go-mqtt/samples/samplecerts - -echo 'Configuring your IBM Messaging Appliance for testing go-mqtt' -echo 'IMA_HOST: ' $IMA_HOST - - -function ima { - reply=`ssh $IMA_USER@$IMA_HOST imaserver $@` -} - -function imp { - reply=`ssh $IMA_USER@$IMA_HOST file get $@` -} - -ima create MessageHub Name=GoMqttTestHub - -# Config "1" is a basic, open endpoint, port 17001 -ima create MessagingPolicy \ - Name=GoMqttMP1 \ - Protocol=MQTT \ - ActionList=Publish,Subscribe \ - MaxMessages=100000 \ - DestinationType=Topic \ - Destination=* - -ima create ConnectionPolicy \ - Name=GoMqttCP1 \ - Protocol=MQTT - -ima create Endpoint \ - Name=GoMqttEP1 \ - Protocol=MQTT \ - MessageHub=GoMqttTestHub \ - ConnectionPolicies=GoMqttCP1 \ - MessagingPolicies=GoMqttMP1 \ - Port=17001 - -# Config "2" is IPv6 only , port 17002 - -# Config "3" is for authorization failures, port 17003 -ima create ConnectionPolicy \ - Name=GoMqttCP2 \ - Protocol=MQTT \ - ClientID=GoMqttClient - -ima create Endpoint \ - Name=GoMqttEP3 \ - Protocol=MQTT \ - MessageHub=GoMqttTestHub \ - ConnectionPolicies=GoMqttCP2 \ - MessagingPolicies=GoMqttMP1 \ - Port=17003 - -# Config "4" is secure connections, port 17004 -imp scp://$USER@$HOST:${CERTDIR}/server-crt.pem . -imp scp://$USER@$HOST:${CERTDIR}/server-key.pem . -imp scp://$USER@$HOST:${CERTDIR}/rootCA-crt.pem . -imp scp://$USER@$HOST:${CERTDIR}/intermediateCA-crt.pem . - -ima apply Certificate \ - CertFileName=server-crt.pem \ - "CertFilePassword=" \ - KeyFileName=server-key.pem \ - "KeyFilePassword=" - -ima create CertificateProfile \ - Name=GoMqttCertProf \ - Certificate=server-crt.pem \ - Key=server-key.pem - -ima create SecurityProfile \ - Name=GoMqttSecProf \ - MinimumProtocolMethod=SSLv3 \ - UseClientCertificate=True \ - UsePasswordAuthentication=False \ - Ciphers=Fast \ - CertificateProfile=GoMqttCertProf - -ima apply Certificate \ - TrustedCertificate=rootCA-crt.pem \ - SecurityProfileName=GoMqttSecProf - -ima apply Certificate \ - TrustedCertificate=intermediateCA-crt.pem \ - SecurityProfileName=GoMqttSecProf - -ima create Endpoint \ - Name=GoMqttEP4 \ - Port=17004 \ - MessageHub=GoMqttTestHub \ - ConnectionPolicies=GoMqttCP1 \ - MessagingPolicies=GoMqttMP1 \ - SecurityProfile=GoMqttSecProf \ - Protocol=MQTT - diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_client_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_client_test.go deleted file mode 100644 index 8a914ceab..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_client_test.go +++ /dev/null @@ -1,1007 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import "fmt" -import "time" -import "bytes" - -import "io/ioutil" -import "crypto/tls" -import "crypto/x509" -import "testing" - -func Test_Start(t *testing.T) { - ops := NewClientOptions().SetClientID("Start"). - AddBroker(FVTTCP). - SetStore(NewFileStore("/tmp/fvt/Start")) - c := NewClient(ops) - - token := c.Connect() - if token.Wait() && token.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", token.Error()) - } - - c.Disconnect(250) -} - -/* uncomment this if you have connection policy disallowing FailClientID -func Test_InvalidConnRc(t *testing.T) { - ops := NewClientOptions().SetClientID("FailClientID"). - AddBroker("tcp://" + FVT_IP + ":17003"). - SetStore(NewFileStore("/tmp/fvt/InvalidConnRc")) - - c := NewClient(ops) - _, err := c.Connect() - if err != ErrNotAuthorized { - t.Fatalf("Did not receive error as expected, got %v", err) - } - c.Disconnect(250) -} -*/ - -// Helper function for Test_Start_Ssl -func NewTLSConfig() *tls.Config { - certpool := x509.NewCertPool() - pemCerts, err := ioutil.ReadFile("samples/samplecerts/CAfile.pem") - if err == nil { - certpool.AppendCertsFromPEM(pemCerts) - } - - cert, err := tls.LoadX509KeyPair("samples/samplecerts/client-crt.pem", "samples/samplecerts/client-key.pem") - if err != nil { - panic(err) - } - - return &tls.Config{ - RootCAs: certpool, - ClientAuth: tls.NoClientCert, - ClientCAs: nil, - InsecureSkipVerify: true, - Certificates: []tls.Certificate{cert}, - } -} - -/* uncomment this if you have ssl setup -func Test_Start_Ssl(t *testing.T) { - tlsconfig := NewTlsConfig() - ops := NewClientOptions().SetClientID("StartSsl"). - AddBroker(FVT_SSL). - SetStore(NewFileStore("/tmp/fvt/Start_Ssl")). - SetTlsConfig(tlsconfig) - - c := NewClient(ops) - - _, err := c.Connect() - if err != nil { - t.Fatalf("Error on Client.Connect(): %v", err) - } - - c.Disconnect(250) -} -*/ - -func Test_Publish_1(t *testing.T) { - ops := NewClientOptions() - ops.AddBroker(FVTTCP) - ops.SetClientID("Publish_1") - ops.SetStore(NewFileStore("/tmp/fvt/Publish_1")) - - c := NewClient(ops) - token := c.Connect() - if token.Wait() && token.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", token.Error()) - } - - c.Publish("test/Publish", 0, false, "Publish qo0") - - c.Disconnect(250) -} - -func Test_Publish_2(t *testing.T) { - ops := NewClientOptions() - ops.AddBroker(FVTTCP) - ops.SetClientID("Publish_2") - ops.SetStore(NewFileStore("/tmp/fvt/Publish_2")) - - c := NewClient(ops) - token := c.Connect() - if token.Wait() && token.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", token.Error()) - } - - c.Publish("/test/Publish", 0, false, "Publish1 qos0") - c.Publish("/test/Publish", 0, false, "Publish2 qos0") - - c.Disconnect(250) -} - -func Test_Publish_3(t *testing.T) { - ops := NewClientOptions() - ops.AddBroker(FVTTCP) - ops.SetClientID("Publish_3") - ops.SetStore(NewFileStore("/tmp/fvt/Publish_3")) - - c := NewClient(ops) - token := c.Connect() - if token.Wait() && token.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", token.Error()) - } - - c.Publish("/test/Publish", 0, false, "Publish1 qos0") - c.Publish("/test/Publish", 1, false, "Publish2 qos1") - c.Publish("/test/Publish", 2, false, "Publish2 qos2") - - c.Disconnect(250) -} - -func Test_Subscribe(t *testing.T) { - pops := NewClientOptions() - pops.AddBroker(FVTTCP) - pops.SetClientID("Subscribe_tx") - pops.SetStore(NewFileStore("/tmp/fvt/Subscribe/p")) - p := NewClient(pops) - - sops := NewClientOptions() - sops.AddBroker(FVTTCP) - sops.SetClientID("Subscribe_rx") - sops.SetStore(NewFileStore("/tmp/fvt/Subscribe/s")) - var f MessageHandler = func(client *Client, msg Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) - } - sops.SetDefaultPublishHandler(f) - s := NewClient(sops) - - sToken := s.Connect() - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) - } - - s.Subscribe("/test/sub", 0, nil) - - pToken := p.Connect() - if pToken.Wait() && pToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) - } - - p.Publish("/test/sub", 0, false, "Publish qos0") - - p.Disconnect(250) - s.Disconnect(250) -} - -func Test_Will(t *testing.T) { - willmsgc := make(chan string) - - sops := NewClientOptions().AddBroker(FVTTCP) - sops.SetClientID("will-giver") - sops.SetWill("/wills", "good-byte!", 0, false) - sops.SetConnectionLostHandler(func(client *Client, err error) { - fmt.Println("OnConnectionLost!") - }) - c := NewClient(sops) - - wops := NewClientOptions() - wops.AddBroker(FVTTCP) - wops.SetClientID("will-subscriber") - wops.SetStore(NewFileStore("/tmp/fvt/Will")) - wops.SetDefaultPublishHandler(func(client *Client, msg Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) - willmsgc <- string(msg.Payload()) - }) - wsub := NewClient(wops) - - wToken := wsub.Connect() - if wToken.Wait() && wToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", wToken.Error()) - } - - wsub.Subscribe("/wills", 0, nil) - - token := c.Connect() - if token.Wait() && token.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", token.Error()) - } - time.Sleep(time.Duration(1) * time.Second) - - c.forceDisconnect() - - wsub.Disconnect(250) - - if <-willmsgc != "good-byte!" { - t.Fatalf("will message did not have correct payload") - } -} - -func Test_Binary_Will(t *testing.T) { - willmsgc := make(chan []byte) - will := []byte{ - 0xDE, - 0xAD, - 0xBE, - 0xEF, - } - - sops := NewClientOptions().AddBroker(FVTTCP) - sops.SetClientID("will-giver") - sops.SetBinaryWill("/wills", will, 0, false) - sops.SetConnectionLostHandler(func(client *Client, err error) { - }) - c := NewClient(sops) - - wops := NewClientOptions().AddBroker(FVTTCP) - wops.SetClientID("will-subscriber") - wops.SetStore(NewFileStore("/tmp/fvt/Binary_Will")) - wops.SetDefaultPublishHandler(func(client *Client, msg Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %v\n", msg.Payload()) - willmsgc <- msg.Payload() - }) - wsub := NewClient(wops) - - wToken := wsub.Connect() - if wToken.Wait() && wToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", wToken.Error()) - } - - wsub.Subscribe("/wills", 0, nil) - - token := c.Connect() - if token.Wait() && token.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", token.Error()) - } - time.Sleep(time.Duration(1) * time.Second) - - c.forceDisconnect() - - wsub.Disconnect(250) - - if !bytes.Equal(<-willmsgc, will) { - t.Fatalf("will message did not have correct payload") - } -} - -/** -"[...] a publisher is responsible for determining the maximum QoS a -message can be delivered at, but a subscriber is able to downgrade -the QoS to one more suitable for its usage. -The QoS of a message is never upgraded." -**/ - -/*********************************** - * Tests to cover the 9 QoS combos * - ***********************************/ - -func wait(c chan bool) { - fmt.Println("choke is waiting") - <-c -} - -// Pub 0, Sub 0 - -func Test_p0s0(t *testing.T) { - store := "/tmp/fvt/p0s0" - topic := "/test/p0s0" - choke := make(chan bool) - - pops := NewClientOptions() - pops.AddBroker(FVTTCP) - pops.SetClientID("p0s0-pub") - pops.SetStore(NewFileStore(store + "/p")) - p := NewClient(pops) - - sops := NewClientOptions() - sops.AddBroker(FVTTCP) - sops.SetClientID("p0s0-sub") - sops.SetStore(NewFileStore(store + "/s")) - var f MessageHandler = func(client *Client, msg Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) - choke <- true - } - sops.SetDefaultPublishHandler(f) - - s := NewClient(sops) - sToken := s.Connect() - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) - } - - sToken = s.Subscribe(topic, 0, nil) - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) - } - - pToken := p.Connect() - if pToken.Wait() && pToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) - } - p.Publish(topic, 0, false, "p0s0 payload 1") - p.Publish(topic, 0, false, "p0s0 payload 2") - - wait(choke) - wait(choke) - - p.Publish(topic, 0, false, "p0s0 payload 3") - wait(choke) - - p.Disconnect(250) - s.Disconnect(250) - - chkcond(isemptydir(store + "/p")) - chkcond(isemptydir(store + "/s")) -} - -// Pub 0, Sub 1 - -func Test_p0s1(t *testing.T) { - store := "/tmp/fvt/p0s1" - topic := "/test/p0s1" - choke := make(chan bool) - - pops := NewClientOptions() - pops.AddBroker(FVTTCP) - pops.SetClientID("p0s1-pub") - pops.SetStore(NewFileStore(store + "/p")) - p := NewClient(pops) - - sops := NewClientOptions() - sops.AddBroker(FVTTCP) - sops.SetClientID("p0s1-sub") - sops.SetStore(NewFileStore(store + "/s")) - var f MessageHandler = func(client *Client, msg Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) - choke <- true - } - sops.SetDefaultPublishHandler(f) - - s := NewClient(sops) - sToken := s.Connect() - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) - } - - sToken = s.Subscribe(topic, 1, nil) - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) - } - - pToken := p.Connect() - if pToken.Wait() && pToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) - } - p.Publish(topic, 0, false, "p0s1 payload 1") - p.Publish(topic, 0, false, "p0s1 payload 2") - - wait(choke) - wait(choke) - - p.Publish(topic, 0, false, "p0s1 payload 3") - wait(choke) - - p.Disconnect(250) - s.Disconnect(250) - - chkcond(isemptydir(store + "/p")) - chkcond(isemptydir(store + "/s")) -} - -// Pub 0, Sub 2 - -func Test_p0s2(t *testing.T) { - store := "/tmp/fvt/p0s2" - topic := "/test/p0s2" - choke := make(chan bool) - - pops := NewClientOptions() - pops.AddBroker(FVTTCP) - pops.SetClientID("p0s2-pub") - pops.SetStore(NewFileStore(store + "/p")) - p := NewClient(pops) - - sops := NewClientOptions() - sops.AddBroker(FVTTCP) - sops.SetClientID("p0s2-sub") - sops.SetStore(NewFileStore(store + "/s")) - var f MessageHandler = func(client *Client, msg Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) - choke <- true - } - sops.SetDefaultPublishHandler(f) - - s := NewClient(sops) - sToken := s.Connect() - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) - } - - sToken = s.Subscribe(topic, 2, nil) - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) - } - - pToken := p.Connect() - if pToken.Wait() && pToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) - } - p.Publish(topic, 0, false, "p0s2 payload 1") - p.Publish(topic, 0, false, "p0s2 payload 2") - - wait(choke) - wait(choke) - - p.Publish(topic, 0, false, "p0s2 payload 3") - - wait(choke) - - p.Disconnect(250) - s.Disconnect(250) - - chkcond(isemptydir(store + "/p")) - chkcond(isemptydir(store + "/s")) -} - -// Pub 1, Sub 0 - -func Test_p1s0(t *testing.T) { - store := "/tmp/fvt/p1s0" - topic := "/test/p1s0" - choke := make(chan bool) - - pops := NewClientOptions() - pops.AddBroker(FVTTCP) - pops.SetClientID("p1s0-pub") - pops.SetStore(NewFileStore(store + "/p")) - p := NewClient(pops) - - sops := NewClientOptions() - sops.AddBroker(FVTTCP) - sops.SetClientID("p1s0-sub") - sops.SetStore(NewFileStore(store + "/s")) - var f MessageHandler = func(client *Client, msg Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) - choke <- true - } - sops.SetDefaultPublishHandler(f) - - s := NewClient(sops) - sToken := s.Connect() - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) - } - - sToken = s.Subscribe(topic, 0, nil) - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) - } - - pToken := p.Connect() - if pToken.Wait() && pToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) - } - p.Publish(topic, 1, false, "p1s0 payload 1") - p.Publish(topic, 1, false, "p1s0 payload 2") - - wait(choke) - wait(choke) - - p.Publish(topic, 1, false, "p1s0 payload 3") - - wait(choke) - - p.Disconnect(250) - s.Disconnect(250) - - chkcond(isemptydir(store + "/p")) - chkcond(isemptydir(store + "/s")) -} - -// Pub 1, Sub 1 - -func Test_p1s1(t *testing.T) { - store := "/tmp/fvt/p1s1" - topic := "/test/p1s1" - choke := make(chan bool) - - pops := NewClientOptions() - pops.AddBroker(FVTTCP) - pops.SetClientID("p1s1-pub") - pops.SetStore(NewFileStore(store + "/p")) - p := NewClient(pops) - - sops := NewClientOptions() - sops.AddBroker(FVTTCP) - sops.SetClientID("p1s1-sub") - sops.SetStore(NewFileStore(store + "/s")) - var f MessageHandler = func(client *Client, msg Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) - choke <- true - } - sops.SetDefaultPublishHandler(f) - - s := NewClient(sops) - sToken := s.Connect() - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) - } - - sToken = s.Subscribe(topic, 1, nil) - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) - } - - pToken := p.Connect() - if pToken.Wait() && pToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) - } - p.Publish(topic, 1, false, "p1s1 payload 1") - p.Publish(topic, 1, false, "p1s1 payload 2") - - wait(choke) - wait(choke) - - p.Publish(topic, 1, false, "p1s1 payload 3") - wait(choke) - - p.Disconnect(250) - s.Disconnect(250) - - chkcond(isemptydir(store + "/p")) - chkcond(isemptydir(store + "/s")) -} - -// Pub 1, Sub 2 - -func Test_p1s2(t *testing.T) { - store := "/tmp/fvt/p1s2" - topic := "/test/p1s2" - choke := make(chan bool) - - pops := NewClientOptions() - pops.AddBroker(FVTTCP) - pops.SetClientID("p1s2-pub") - pops.SetStore(NewFileStore(store + "/p")) - p := NewClient(pops) - - sops := NewClientOptions() - sops.AddBroker(FVTTCP) - sops.SetClientID("p1s2-sub") - sops.SetStore(NewFileStore(store + "/s")) - var f MessageHandler = func(client *Client, msg Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) - choke <- true - } - sops.SetDefaultPublishHandler(f) - - s := NewClient(sops) - sToken := s.Connect() - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) - } - - sToken = s.Subscribe(topic, 2, nil) - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) - } - - pToken := p.Connect() - if pToken.Wait() && pToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) - } - p.Publish(topic, 1, false, "p1s2 payload 1") - p.Publish(topic, 1, false, "p1s2 payload 2") - - wait(choke) - wait(choke) - - p.Publish(topic, 1, false, "p1s2 payload 3") - - wait(choke) - - p.Disconnect(250) - s.Disconnect(250) - - chkcond(isemptydir(store + "/p")) - chkcond(isemptydir(store + "/s")) -} - -// Pub 2, Sub 0 - -func Test_p2s0(t *testing.T) { - store := "/tmp/fvt/p2s0" - topic := "/test/p2s0" - choke := make(chan bool) - - pops := NewClientOptions() - pops.AddBroker(FVTTCP) - pops.SetClientID("p2s0-pub") - pops.SetStore(NewFileStore(store + "/p")) - p := NewClient(pops) - - sops := NewClientOptions() - sops.AddBroker(FVTTCP) - sops.SetClientID("p2s0-sub") - sops.SetStore(NewFileStore(store + "/s")) - var f MessageHandler = func(client *Client, msg Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) - choke <- true - } - sops.SetDefaultPublishHandler(f) - - s := NewClient(sops) - sToken := s.Connect() - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) - } - - sToken = s.Subscribe(topic, 0, nil) - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) - } - - pToken := p.Connect() - if pToken.Wait() && pToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) - } - p.Publish(topic, 2, false, "p2s0 payload 1") - p.Publish(topic, 2, false, "p2s0 payload 2") - wait(choke) - wait(choke) - - p.Publish(topic, 2, false, "p2s0 payload 3") - wait(choke) - - p.Disconnect(250) - s.Disconnect(250) - - chkcond(isemptydir(store + "/p")) - chkcond(isemptydir(store + "/s")) -} - -// Pub 2, Sub 1 - -func Test_p2s1(t *testing.T) { - store := "/tmp/fvt/p2s1" - topic := "/test/p2s1" - choke := make(chan bool) - - pops := NewClientOptions() - pops.AddBroker(FVTTCP) - pops.SetClientID("p2s1-pub") - pops.SetStore(NewFileStore(store + "/p")) - p := NewClient(pops) - - sops := NewClientOptions() - sops.AddBroker(FVTTCP) - sops.SetClientID("p2s1-sub") - sops.SetStore(NewFileStore(store + "/s")) - var f MessageHandler = func(client *Client, msg Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) - choke <- true - } - sops.SetDefaultPublishHandler(f) - - s := NewClient(sops) - sToken := s.Connect() - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) - } - - sToken = s.Subscribe(topic, 1, nil) - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) - } - - pToken := p.Connect() - if pToken.Wait() && pToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) - } - p.Publish(topic, 2, false, "p2s1 payload 1") - p.Publish(topic, 2, false, "p2s1 payload 2") - - wait(choke) - wait(choke) - - p.Publish(topic, 2, false, "p2s1 payload 3") - - wait(choke) - - p.Disconnect(250) - s.Disconnect(250) - - chkcond(isemptydir(store + "/p")) - chkcond(isemptydir(store + "/s")) -} - -// Pub 2, Sub 2 - -func Test_p2s2(t *testing.T) { - store := "/tmp/fvt/p2s2" - topic := "/test/p2s2" - choke := make(chan bool) - - pops := NewClientOptions() - pops.AddBroker(FVTTCP) - pops.SetClientID("p2s2-pub") - pops.SetStore(NewFileStore(store + "/p")) - p := NewClient(pops) - - sops := NewClientOptions() - sops.AddBroker(FVTTCP) - sops.SetClientID("p2s2-sub") - sops.SetStore(NewFileStore(store + "/s")) - var f MessageHandler = func(client *Client, msg Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) - choke <- true - } - sops.SetDefaultPublishHandler(f) - - s := NewClient(sops) - sToken := s.Connect() - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) - } - - sToken = s.Subscribe(topic, 2, nil) - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) - } - - pToken := p.Connect() - if pToken.Wait() && pToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) - } - p.Publish(topic, 2, false, "p2s2 payload 1") - p.Publish(topic, 2, false, "p2s2 payload 2") - - wait(choke) - wait(choke) - - p.Publish(topic, 2, false, "p2s2 payload 3") - - wait(choke) - - p.Disconnect(250) - s.Disconnect(250) - - chkcond(isemptydir(store + "/p")) - chkcond(isemptydir(store + "/s")) -} - -func Test_PublishMessage(t *testing.T) { - store := "/tmp/fvt/PublishMessage" - topic := "/test/pubmsg" - choke := make(chan bool) - - pops := NewClientOptions() - pops.AddBroker(FVTTCP) - pops.SetClientID("pubmsg-pub") - pops.SetStore(NewFileStore(store + "/p")) - p := NewClient(pops) - - sops := NewClientOptions() - sops.AddBroker(FVTTCP) - sops.SetClientID("pubmsg-sub") - sops.SetStore(NewFileStore(store + "/s")) - var f MessageHandler = func(client *Client, msg Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) - if string(msg.Payload()) != "pubmsg payload" { - fmt.Println("Message payload incorrect", msg.Payload(), len("pubmsg payload")) - t.Fatalf("Message payload incorrect") - } - choke <- true - } - sops.SetDefaultPublishHandler(f) - - s := NewClient(sops) - sToken := s.Connect() - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) - } - - sToken = s.Subscribe(topic, 2, nil) - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) - } - - pToken := p.Connect() - if pToken.Wait() && pToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) - } - - text := "pubmsg payload" - p.Publish(topic, 0, false, text) - p.Publish(topic, 0, false, text) - wait(choke) - wait(choke) - - p.Publish(topic, 0, false, text) - wait(choke) - - p.Disconnect(250) - s.Disconnect(250) - - chkcond(isemptydir(store + "/p")) - chkcond(isemptydir(store + "/s")) -} - -func Test_PublishEmptyMessage(t *testing.T) { - store := "/tmp/fvt/PublishEmptyMessage" - topic := "/test/pubmsgempty" - choke := make(chan bool) - - pops := NewClientOptions() - pops.AddBroker(FVTTCP) - pops.SetClientID("pubmsgempty-pub") - pops.SetStore(NewFileStore(store + "/p")) - p := NewClient(pops) - - sops := NewClientOptions() - sops.AddBroker(FVTTCP) - sops.SetClientID("pubmsgempty-sub") - sops.SetStore(NewFileStore(store + "/s")) - var f MessageHandler = func(client *Client, msg Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) - if string(msg.Payload()) != "" { - t.Fatalf("Message payload incorrect") - } - choke <- true - } - sops.SetDefaultPublishHandler(f) - - s := NewClient(sops) - sToken := s.Connect() - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) - } - - sToken = s.Subscribe(topic, 2, nil) - if sToken.Wait() && sToken.Error() != nil { - t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) - } - - pToken := p.Connect() - if pToken.Wait() && pToken.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) - } - - p.Publish(topic, 0, false, "") - p.Publish(topic, 0, false, "") - wait(choke) - wait(choke) - - p.Publish(topic, 0, false, "") - wait(choke) - - p.Disconnect(250) -} - -// func Test_Cleanstore(t *testing.T) { -// store := "/tmp/fvt/cleanstore" -// topic := "/test/cleanstore" - -// pops := NewClientOptions() -// pops.AddBroker(FVTTCP) -// pops.SetClientID("cleanstore-pub") -// pops.SetStore(NewFileStore(store + "/p")) -// p := NewClient(pops) - -// var s *Client -// sops := NewClientOptions() -// sops.AddBroker(FVTTCP) -// sops.SetClientID("cleanstore-sub") -// sops.SetCleanSession(false) -// sops.SetStore(NewFileStore(store + "/s")) -// var f MessageHandler = func(client *Client, msg Message) { -// fmt.Printf("TOPIC: %s\n", msg.Topic()) -// fmt.Printf("MSG: %s\n", msg.Payload()) -// // Close the connection after receiving -// // the first message so that hopefully -// // there is something in the store to be -// // cleaned. -// s.ForceDisconnect() -// } -// sops.SetDefaultPublishHandler(f) - -// s = NewClient(sops) -// sToken := s.Connect() -// if sToken.Wait() && sToken.Error() != nil { -// t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) -// } - -// sToken = s.Subscribe(topic, 2, nil) -// if sToken.Wait() && sToken.Error() != nil { -// t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) -// } - -// pToken := p.Connect() -// if pToken.Wait() && pToken.Error() != nil { -// t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) -// } - -// text := "test message" -// p.Publish(topic, 0, false, text) -// p.Publish(topic, 0, false, text) -// p.Publish(topic, 0, false, text) - -// p.Disconnect(250) - -// s2ops := NewClientOptions() -// s2ops.AddBroker(FVTTCP) -// s2ops.SetClientID("cleanstore-sub") -// s2ops.SetCleanSession(true) -// s2ops.SetStore(NewFileStore(store + "/s")) -// s2ops.SetDefaultPublishHandler(f) - -// s2 := NewClient(s2ops) -// sToken = s2.Connect() -// if sToken.Wait() && sToken.Error() != nil { -// t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) -// } - -// // at this point existing state should be cleared... -// // how to check? -// } - -func Test_MultipleURLs(t *testing.T) { - ops := NewClientOptions() - ops.AddBroker("tcp://127.0.0.1:10000") - ops.AddBroker(FVTTCP) - ops.SetClientID("MutliURL") - ops.SetStore(NewFileStore("/tmp/fvt/MultiURL")) - - c := NewClient(ops) - token := c.Connect() - if token.Wait() && token.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", token.Error()) - } - - token = c.Publish("/test/MultiURL", 0, false, "Publish qo0") - token.Wait() - - c.Disconnect(250) -} - -/* -// A test to make sure ping mechanism is working -// This test can be left commented out because it's annoying to wait for -func Test_ping3_idle10(t *testing.T) { - ops := NewClientOptions() - ops.AddBroker(FVTTCP) - //ops.AddBroker("tcp://test.mosquitto.org:1883") - ops.SetClientID("p3i10") - ops.SetKeepAlive(4) - - c := NewClient(ops) - token := c.Connect() - if token.Wait() && token.Error() != nil { - t.Fatalf("Error on Client.Connect(): %v", token.Error()) - } - time.Sleep(time.Duration(10) * time.Second) - c.Disconnect(250) -} -*/ diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_store_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_store_test.go deleted file mode 100644 index d74490b54..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_store_test.go +++ /dev/null @@ -1,496 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "bytes" - "fmt" - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" - "testing" -) - -/******************************* - **** Some helper functions **** - *******************************/ - -func b2s(bs []byte) string { - s := "" - for _, b := range bs { - s += fmt.Sprintf("%x ", b) - } - return s -} - -/********************************************** - **** A mock store implementation for test **** - **********************************************/ - -type TestStore struct { - mput []uint16 - mget []uint16 - mdel []uint16 -} - -func (ts *TestStore) Open() { -} - -func (ts *TestStore) Close() { -} - -func (ts *TestStore) Put(key string, m packets.ControlPacket) { - ts.mput = append(ts.mput, m.Details().MessageID) -} - -func (ts *TestStore) Get(key string) packets.ControlPacket { - mid := mIDFromKey(key) - ts.mget = append(ts.mget, mid) - return nil -} - -func (ts *TestStore) All() []string { - return nil -} - -func (ts *TestStore) Del(key string) { - mid := mIDFromKey(key) - ts.mdel = append(ts.mdel, mid) -} - -func (ts *TestStore) Reset() { -} - -/******************* - **** FileStore **** - *******************/ - -func Test_NewFileStore(t *testing.T) { - storedir := "/tmp/TestStore/_new" - f := NewFileStore(storedir) - if f.opened { - t.Fatalf("filestore was opened without opening it") - } - if f.directory != storedir { - t.Fatalf("filestore directory is wrong") - } - // storedir might exist or might not, just like with a real client - // the point is, we don't care, we just want it to exist after it is - // opened -} - -func Test_FileStore_Open(t *testing.T) { - storedir := "/tmp/TestStore/_open" - - f := NewFileStore(storedir) - f.Open() - if !f.opened { - t.Fatalf("filestore was not set open") - } - if f.directory != storedir { - t.Fatalf("filestore directory is wrong") - } - if !exists(storedir) { - t.Fatalf("filestore directory does not exst after opening it") - } -} - -func Test_FileStore_Close(t *testing.T) { - storedir := "/tmp/TestStore/_unopen" - f := NewFileStore(storedir) - f.Open() - if !f.opened { - t.Fatalf("filestore was not set open") - } - if f.directory != storedir { - t.Fatalf("filestore directory is wrong") - } - if !exists(storedir) { - t.Fatalf("filestore directory does not exst after opening it") - } - - f.Close() - if f.opened { - t.Fatalf("filestore was still open after unopen") - } - if !exists(storedir) { - t.Fatalf("filestore was deleted after unopen") - } -} - -func Test_FileStore_write(t *testing.T) { - storedir := "/tmp/TestStore/_write" - f := NewFileStore(storedir) - f.Open() - - pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pm.Qos = 1 - pm.TopicName = "a/b/c" - pm.Payload = []byte{0xBE, 0xEF, 0xED} - pm.MessageID = 91 - - key := inboundKeyFromMID(pm.MessageID) - f.Put(key, pm) - - if !exists(storedir + "/i.91.msg") { - t.Fatalf("message not in store") - } - -} - -func Test_FileStore_Get(t *testing.T) { - storedir := "/tmp/TestStore/_get" - f := NewFileStore(storedir) - f.Open() - pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pm.Qos = 1 - pm.TopicName = "/a/b/c" - pm.Payload = []byte{0xBE, 0xEF, 0xED} - pm.MessageID = 120 - - key := outboundKeyFromMID(pm.MessageID) - f.Put(key, pm) - - if !exists(storedir + "/o.120.msg") { - t.Fatalf("message not in store") - } - - exp := []byte{ - /* msg type */ - 0x32, // qos 1 - - /* remlen */ - 0x0d, - - /* topic, msg id in varheader */ - 0x00, // length of topic - 0x06, - 0x2F, // / - 0x61, // a - 0x2F, // / - 0x62, // b - 0x2F, // / - 0x63, // c - - /* msg id (is always 2 bytes) */ - 0x00, - 0x78, - - /*payload */ - 0xBE, - 0xEF, - 0xED, - } - - m := f.Get(key) - - if m == nil { - t.Fatalf("message not retreived from store") - } - - var msg bytes.Buffer - m.Write(&msg) - if !bytes.Equal(exp, msg.Bytes()) { - t.Fatal("message from store not same as what went in", msg.Bytes()) - } -} - -func Test_FileStore_All(t *testing.T) { - storedir := "/tmp/TestStore/_all" - f := NewFileStore(storedir) - f.Open() - pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pm.Qos = 2 - pm.TopicName = "/t/r/v" - pm.Payload = []byte{0x01, 0x02} - pm.MessageID = 121 - - key := outboundKeyFromMID(pm.MessageID) - f.Put(key, pm) - - keys := f.All() - if len(keys) != 1 { - t.Fatalf("FileStore.All does not have the messages") - } - - if keys[0] != "o.121" { - t.Fatalf("FileStore.All has wrong key") - } -} - -func Test_FileStore_Del(t *testing.T) { - storedir := "/tmp/TestStore/_del" - f := NewFileStore(storedir) - f.Open() - - pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pm.Qos = 1 - pm.TopicName = "a/b/c" - pm.Payload = []byte{0xBE, 0xEF, 0xED} - pm.MessageID = 17 - - key := inboundKeyFromMID(pm.MessageID) - f.Put(key, pm) - - if !exists(storedir + "/i.17.msg") { - t.Fatalf("message not in store") - } - - f.Del(key) - - if exists(storedir + "/i.17.msg") { - t.Fatalf("message still exists after deletion") - } -} - -func Test_FileStore_Reset(t *testing.T) { - storedir := "/tmp/TestStore/_reset" - f := NewFileStore(storedir) - f.Open() - - pm1 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pm1.Qos = 1 - pm1.TopicName = "/q/w/e" - pm1.Payload = []byte{0xBB} - pm1.MessageID = 71 - key1 := inboundKeyFromMID(pm1.MessageID) - f.Put(key1, pm1) - - pm2 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pm2.Qos = 1 - pm2.TopicName = "/q/w/e" - pm2.Payload = []byte{0xBB} - pm2.MessageID = 72 - key2 := inboundKeyFromMID(pm2.MessageID) - f.Put(key2, pm2) - - pm3 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pm3.Qos = 1 - pm3.TopicName = "/q/w/e" - pm3.Payload = []byte{0xBB} - pm3.MessageID = 73 - key3 := inboundKeyFromMID(pm3.MessageID) - f.Put(key3, pm3) - - pm4 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pm4.Qos = 1 - pm4.TopicName = "/q/w/e" - pm4.Payload = []byte{0xBB} - pm4.MessageID = 74 - key4 := inboundKeyFromMID(pm4.MessageID) - f.Put(key4, pm4) - - pm5 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pm5.Qos = 1 - pm5.TopicName = "/q/w/e" - pm5.Payload = []byte{0xBB} - pm5.MessageID = 75 - key5 := inboundKeyFromMID(pm5.MessageID) - f.Put(key5, pm5) - - if !exists(storedir + "/i.71.msg") { - t.Fatalf("message not in store") - } - - if !exists(storedir + "/i.72.msg") { - t.Fatalf("message not in store") - } - - if !exists(storedir + "/i.73.msg") { - t.Fatalf("message not in store") - } - - if !exists(storedir + "/i.74.msg") { - t.Fatalf("message not in store") - } - - if !exists(storedir + "/i.75.msg") { - t.Fatalf("message not in store") - } - - f.Reset() - - if exists(storedir + "/i.71.msg") { - t.Fatalf("message still exists after reset") - } - - if exists(storedir + "/i.72.msg") { - t.Fatalf("message still exists after reset") - } - - if exists(storedir + "/i.73.msg") { - t.Fatalf("message still exists after reset") - } - - if exists(storedir + "/i.74.msg") { - t.Fatalf("message still exists after reset") - } - - if exists(storedir + "/i.75.msg") { - t.Fatalf("message still exists after reset") - } -} - -/******************* - *** MemoryStore *** - *******************/ - -func Test_NewMemoryStore(t *testing.T) { - m := NewMemoryStore() - if m == nil { - t.Fatalf("MemoryStore could not be created") - } -} - -func Test_MemoryStore_Open(t *testing.T) { - m := NewMemoryStore() - m.Open() - if !m.opened { - t.Fatalf("MemoryStore was not set open") - } -} - -func Test_MemoryStore_Close(t *testing.T) { - m := NewMemoryStore() - m.Open() - if !m.opened { - t.Fatalf("MemoryStore was not set open") - } - - m.Close() - if m.opened { - t.Fatalf("MemoryStore was still open after unopen") - } -} - -func Test_MemoryStore_Reset(t *testing.T) { - m := NewMemoryStore() - m.Open() - - pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pm.Qos = 2 - pm.TopicName = "/f/r/s" - pm.Payload = []byte{0xAB} - pm.MessageID = 81 - - key := outboundKeyFromMID(pm.MessageID) - m.Put(key, pm) - - if len(m.messages) != 1 { - t.Fatalf("message not in memstore") - } - - m.Reset() - - if len(m.messages) != 0 { - t.Fatalf("reset did not clear memstore") - } -} - -func Test_MemoryStore_write(t *testing.T) { - m := NewMemoryStore() - m.Open() - - pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pm.Qos = 1 - pm.TopicName = "/a/b/c" - pm.Payload = []byte{0xBE, 0xEF, 0xED} - pm.MessageID = 91 - key := inboundKeyFromMID(pm.MessageID) - m.Put(key, pm) - - if len(m.messages) != 1 { - t.Fatalf("message not in store") - } -} - -func Test_MemoryStore_Get(t *testing.T) { - m := NewMemoryStore() - m.Open() - pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pm.Qos = 1 - pm.TopicName = "/a/b/c" - pm.Payload = []byte{0xBE, 0xEF, 0xED} - pm.MessageID = 120 - - key := outboundKeyFromMID(pm.MessageID) - m.Put(key, pm) - - if len(m.messages) != 1 { - t.Fatalf("message not in store") - } - - exp := []byte{ - /* msg type */ - 0x32, // qos 1 - - /* remlen */ - 0x0d, - - /* topic, msg id in varheader */ - 0x00, // length of topic - 0x06, - 0x2F, // / - 0x61, // a - 0x2F, // / - 0x62, // b - 0x2F, // / - 0x63, // c - - /* msg id (is always 2 bytes) */ - 0x00, - 0x78, - - /*payload */ - 0xBE, - 0xEF, - 0xED, - } - - msg := m.Get(key) - - if msg == nil { - t.Fatalf("message not retreived from store") - } - - var buf bytes.Buffer - msg.Write(&buf) - if !bytes.Equal(exp, buf.Bytes()) { - t.Fatalf("message from store not same as what went in") - } -} - -func Test_MemoryStore_Del(t *testing.T) { - m := NewMemoryStore() - m.Open() - - pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pm.Qos = 1 - pm.TopicName = "/a/b/c" - pm.Payload = []byte{0xBE, 0xEF, 0xED} - pm.MessageID = 17 - - key := outboundKeyFromMID(pm.MessageID) - - m.Put(key, pm) - - if len(m.messages) != 1 { - t.Fatalf("message not in store") - } - - m.Del(key) - - if len(m.messages) != 1 { - t.Fatalf("message still exists after deletion") - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_test.go deleted file mode 100644 index 6afd9be08..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_test.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -// Use setup_IMA.sh for IBM's MessageSight -// Use fvt/rsmb.cfg for IBM's Really Small Message Broker -// Use fvt/mosquitto.cfg for the open source Mosquitto project - -// Set these values to the URI of your MQTT Broker before running go-test -const ( - FVTAddr = "iot.eclipse.org" - FVTTCP = "tcp://" + FVTAddr + ":1883" - FVTSSL = "ssl://" + FVTAddr + ":8883" -) diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/memstore.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/memstore.go deleted file mode 100644 index 690a2cca5..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/memstore.go +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" - "sync" -) - -// MemoryStore implements the store interface to provide a "persistence" -// mechanism wholly stored in memory. This is only useful for -// as long as the client instance exists. -type MemoryStore struct { - sync.RWMutex - messages map[string]packets.ControlPacket - opened bool -} - -// NewMemoryStore returns a pointer to a new instance of -// MemoryStore, the instance is not initialized and ready to -// use until Open() has been called on it. -func NewMemoryStore() *MemoryStore { - store := &MemoryStore{ - messages: make(map[string]packets.ControlPacket), - opened: false, - } - return store -} - -// Open initializes a MemoryStore instance. -func (store *MemoryStore) Open() { - store.Lock() - defer store.Unlock() - store.opened = true - DEBUG.Println(STR, "memorystore initialized") -} - -// Put takes a key and a pointer to a Message and stores the -// message. -func (store *MemoryStore) Put(key string, message packets.ControlPacket) { - store.Lock() - defer store.Unlock() - chkcond(store.opened) - store.messages[key] = message -} - -// Get takes a key and looks in the store for a matching Message -// returning either the Message pointer or nil. -func (store *MemoryStore) Get(key string) packets.ControlPacket { - store.RLock() - defer store.RUnlock() - chkcond(store.opened) - mid := mIDFromKey(key) - m := store.messages[key] - if m == nil { - CRITICAL.Println(STR, "memorystore get: message", mid, "not found") - } else { - DEBUG.Println(STR, "memorystore get: message", mid, "found") - } - return m -} - -// All returns a slice of strings containing all the keys currently -// in the MemoryStore. -func (store *MemoryStore) All() []string { - store.RLock() - defer store.RUnlock() - chkcond(store.opened) - keys := []string{} - for k := range store.messages { - keys = append(keys, k) - } - return keys -} - -// Del takes a key, searches the MemoryStore and if the key is found -// deletes the Message pointer associated with it. -func (store *MemoryStore) Del(key string) { - store.Lock() - defer store.Unlock() - mid := mIDFromKey(key) - m := store.messages[key] - if m == nil { - WARN.Println(STR, "memorystore del: message", mid, "not found") - } else { - store.messages[key] = nil - DEBUG.Println(STR, "memorystore del: message", mid, "was deleted") - } -} - -// Close will disallow modifications to the state of the store. -func (store *MemoryStore) Close() { - store.Lock() - defer store.Unlock() - chkcond(store.opened) - store.opened = false - DEBUG.Println(STR, "memorystore closed") -} - -// Reset eliminates all persisted message data in the store. -func (store *MemoryStore) Reset() { - store.Lock() - defer store.Unlock() - chkcond(store.opened) - store.messages = make(map[string]packets.ControlPacket) - WARN.Println(STR, "memorystore wiped") -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/message.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/message.go deleted file mode 100644 index 0f53a1701..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/message.go +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" -) - -// Message defines the externals that a message implementation must support -// these are received messages that are passed to the callbacks, not internal -// messages -type Message interface { - Duplicate() bool - Qos() byte - Retained() bool - Topic() string - MessageID() uint16 - Payload() []byte -} - -type message struct { - duplicate bool - qos byte - retained bool - topic string - messageID uint16 - payload []byte -} - -func (m *message) Duplicate() bool { - return m.duplicate -} - -func (m *message) Qos() byte { - return m.qos -} - -func (m *message) Retained() bool { - return m.retained -} - -func (m *message) Topic() string { - return m.topic -} - -func (m *message) MessageID() uint16 { - return m.messageID -} - -func (m *message) Payload() []byte { - return m.payload -} - -func messageFromPublish(p *packets.PublishPacket) Message { - return &message{ - duplicate: p.Dup, - qos: p.Qos, - retained: p.Retain, - topic: p.TopicName, - messageID: p.MessageID, - payload: p.Payload, - } -} - -func newConnectMsgFromOptions(options *ClientOptions) *packets.ConnectPacket { - m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket) - - m.CleanSession = options.CleanSession - m.WillFlag = options.WillEnabled - m.WillRetain = options.WillRetained - m.ClientIdentifier = options.ClientID - - if options.WillEnabled { - m.WillQos = options.WillQos - m.WillTopic = options.WillTopic - m.WillMessage = options.WillPayload - } - - if options.Username != "" { - m.UsernameFlag = true - m.Username = options.Username - //mustn't have password without user as well - if options.Password != "" { - m.PasswordFlag = true - m.Password = []byte(options.Password) - } - } - - m.KeepaliveTimer = uint16(options.KeepAlive) - - return m -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/messageids.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/messageids.go deleted file mode 100644 index a6fc3ae4b..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/messageids.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "sync" -) - -// MId is 16 bit message id as specified by the MQTT spec. -// In general, these values should not be depended upon by -// the client application. -type MId uint16 - -type messageIds struct { - sync.RWMutex - index map[uint16]Token -} - -const ( - midMin uint16 = 1 - midMax uint16 = 65535 -) - -func (mids *messageIds) freeID(id uint16) { - mids.Lock() - defer mids.Unlock() - delete(mids.index, id) -} - -func (mids *messageIds) getID(t Token) uint16 { - mids.Lock() - defer mids.Unlock() - for i := midMin; i < midMax; i++ { - if _, ok := mids.index[i]; !ok { - mids.index[i] = t - return i - } - } - return 0 -} - -func (mids *messageIds) getToken(id uint16) Token { - mids.RLock() - defer mids.RUnlock() - if token, ok := mids.index[id]; ok { - return token - } - return nil -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net.go deleted file mode 100644 index daee86820..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net.go +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "crypto/tls" - "errors" - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" - "golang.org/x/net/websocket" - "net" - "net/url" - "reflect" - "time" -) - -func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration) (net.Conn, error) { - switch uri.Scheme { - case "ws": - conn, err := websocket.Dial(uri.String(), "mqtt", "ws://localhost") - if err != nil { - return nil, err - } - conn.PayloadType = websocket.BinaryFrame - return conn, err - case "wss": - config, _ := websocket.NewConfig(uri.String(), "ws://localhost") - config.Protocol = []string{"mqtt"} - config.TlsConfig = tlsc - conn, err := websocket.DialConfig(config) - if err != nil { - return nil, err - } - conn.PayloadType = websocket.BinaryFrame - return conn, err - case "tcp": - conn, err := net.DialTimeout("tcp", uri.Host, timeout) - if err != nil { - return nil, err - } - return conn, nil - case "ssl": - fallthrough - case "tls": - fallthrough - case "tcps": - conn, err := tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", uri.Host, tlsc) - if err != nil { - return nil, err - } - return conn, nil - } - return nil, errors.New("Unknown protocol") -} - -// actually read incoming messages off the wire -// send Message object into ibound channel -func incoming(c *Client) { - defer c.workers.Done() - var err error - var cp packets.ControlPacket - - DEBUG.Println(NET, "incoming started") - - for { - if cp, err = packets.ReadPacket(c.conn); err != nil { - break - } - DEBUG.Println(NET, "Received Message") - c.ibound <- cp - } - // We received an error on read. - // If disconnect is in progress, swallow error and return - select { - case <-c.stop: - DEBUG.Println(NET, "incoming stopped") - return - // Not trying to disconnect, send the error to the errors channel - default: - ERROR.Println(NET, "incoming stopped with error") - c.errors <- err - return - } -} - -// receive a Message object on obound, and then -// actually send outgoing message to the wire -func outgoing(c *Client) { - defer c.workers.Done() - DEBUG.Println(NET, "outgoing started") - - for { - DEBUG.Println(NET, "outgoing waiting for an outbound message") - select { - case <-c.stop: - DEBUG.Println(NET, "outgoing stopped") - return - case pub := <-c.obound: - msg := pub.p.(*packets.PublishPacket) - if msg.Qos != 0 && msg.MessageID == 0 { - msg.MessageID = c.getID(pub.t) - pub.t.(*PublishToken).messageID = msg.MessageID - } - //persist_obound(c.persist, msg) - - if c.options.WriteTimeout > 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.options.WriteTimeout)) - } - - if err := msg.Write(c.conn); err != nil { - ERROR.Println(NET, "outgoing stopped with error") - c.errors <- err - return - } - - if c.options.WriteTimeout > 0 { - // If we successfully wrote, we don't want the timeout to happen during an idle period - // so we reset it to infinite. - c.conn.SetWriteDeadline(time.Time{}) - } - - if msg.Qos == 0 { - pub.t.flowComplete() - } - - c.lastContact.update() - DEBUG.Println(NET, "obound wrote msg, id:", msg.MessageID) - case msg := <-c.oboundP: - switch msg.p.(type) { - case *packets.SubscribePacket: - msg.p.(*packets.SubscribePacket).MessageID = c.getID(msg.t) - case *packets.UnsubscribePacket: - msg.p.(*packets.UnsubscribePacket).MessageID = c.getID(msg.t) - } - DEBUG.Println(NET, "obound priority msg to write, type", reflect.TypeOf(msg.p)) - if err := msg.p.Write(c.conn); err != nil { - ERROR.Println(NET, "outgoing stopped with error") - c.errors <- err - return - } - c.lastContact.update() - switch msg.p.(type) { - case *packets.DisconnectPacket: - msg.t.(*DisconnectToken).flowComplete() - DEBUG.Println(NET, "outbound wrote disconnect, stopping") - return - } - } - } -} - -// receive Message objects on ibound -// store messages if necessary -// send replies on obound -// delete messages from store if necessary -func alllogic(c *Client) { - - DEBUG.Println(NET, "logic started") - - for { - DEBUG.Println(NET, "logic waiting for msg on ibound") - - select { - case msg := <-c.ibound: - DEBUG.Println(NET, "logic got msg on ibound") - //persist_ibound(c.persist, msg) - switch msg.(type) { - case *packets.PingrespPacket: - DEBUG.Println(NET, "received pingresp") - c.pingOutstanding = false - case *packets.SubackPacket: - sa := msg.(*packets.SubackPacket) - DEBUG.Println(NET, "received suback, id:", sa.MessageID) - token := c.getToken(sa.MessageID).(*SubscribeToken) - DEBUG.Println(NET, "granted qoss", sa.GrantedQoss) - for i, qos := range sa.GrantedQoss { - token.subResult[token.subs[i]] = qos - } - token.flowComplete() - go c.freeID(sa.MessageID) - case *packets.UnsubackPacket: - ua := msg.(*packets.UnsubackPacket) - DEBUG.Println(NET, "received unsuback, id:", ua.MessageID) - token := c.getToken(ua.MessageID).(*UnsubscribeToken) - token.flowComplete() - go c.freeID(ua.MessageID) - case *packets.PublishPacket: - pp := msg.(*packets.PublishPacket) - DEBUG.Println(NET, "received publish, msgId:", pp.MessageID) - DEBUG.Println(NET, "putting msg on onPubChan") - switch pp.Qos { - case 2: - c.incomingPubChan <- pp - DEBUG.Println(NET, "done putting msg on incomingPubChan") - pr := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket) - pr.MessageID = pp.MessageID - DEBUG.Println(NET, "putting pubrec msg on obound") - c.oboundP <- &PacketAndToken{p: pr, t: nil} - DEBUG.Println(NET, "done putting pubrec msg on obound") - case 1: - c.incomingPubChan <- pp - DEBUG.Println(NET, "done putting msg on incomingPubChan") - pa := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket) - pa.MessageID = pp.MessageID - DEBUG.Println(NET, "putting puback msg on obound") - c.oboundP <- &PacketAndToken{p: pa, t: nil} - DEBUG.Println(NET, "done putting puback msg on obound") - case 0: - select { - case c.incomingPubChan <- pp: - DEBUG.Println(NET, "done putting msg on incomingPubChan") - case err, ok := <-c.errors: - DEBUG.Println(NET, "error while putting msg on pubChanZero") - // We are unblocked, but need to put the error back on so the outer - // select can handle it appropriately. - if ok { - go func(errVal error, errChan chan error) { - errChan <- errVal - }(err, c.errors) - } - } - } - case *packets.PubackPacket: - pa := msg.(*packets.PubackPacket) - DEBUG.Println(NET, "received puback, id:", pa.MessageID) - // c.receipts.get(msg.MsgId()) <- Receipt{} - // c.receipts.end(msg.MsgId()) - c.getToken(pa.MessageID).flowComplete() - c.freeID(pa.MessageID) - case *packets.PubrecPacket: - prec := msg.(*packets.PubrecPacket) - DEBUG.Println(NET, "received pubrec, id:", prec.MessageID) - prel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket) - prel.MessageID = prec.MessageID - select { - case c.oboundP <- &PacketAndToken{p: prel, t: nil}: - case <-time.After(time.Second): - } - case *packets.PubrelPacket: - pr := msg.(*packets.PubrelPacket) - DEBUG.Println(NET, "received pubrel, id:", pr.MessageID) - pc := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket) - pc.MessageID = pr.MessageID - select { - case c.oboundP <- &PacketAndToken{p: pc, t: nil}: - case <-time.After(time.Second): - } - case *packets.PubcompPacket: - pc := msg.(*packets.PubcompPacket) - DEBUG.Println(NET, "received pubcomp, id:", pc.MessageID) - c.getToken(pc.MessageID).flowComplete() - c.freeID(pc.MessageID) - } - case <-c.stop: - WARN.Println(NET, "logic stopped") - return - case err := <-c.errors: - ERROR.Println(NET, "logic got error") - c.internalConnLost(err) - return - } - c.lastContact.update() - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net_test.go deleted file mode 100644 index 9598bd6b3..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package mqtt - -import ( - "errors" - "fmt" - "strconv" - "testing" -) - -func Test_openConnection(t *testing.T) { - _, err := strconv.Atoi("") - e := fmt.Errorf(" : %s", err) - t.Errorf("%#v", e) - - e1 := errors.New("hogehoge %s") - t.Errorf("%#v", e1) -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/notice.html b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/notice.html deleted file mode 100644 index f19c483b9..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/notice.html +++ /dev/null @@ -1,108 +0,0 @@ - - - - - -Eclipse Foundation Software User Agreement - - - -

Eclipse Foundation Software User Agreement

-

February 1, 2011

- -

Usage Of Content

- -

THE ECLIPSE FOUNDATION MAKES AVAILABLE SOFTWARE, DOCUMENTATION, INFORMATION AND/OR OTHER MATERIALS FOR OPEN SOURCE PROJECTS - (COLLECTIVELY "CONTENT"). USE OF THE CONTENT IS GOVERNED BY THE TERMS AND CONDITIONS OF THIS AGREEMENT AND/OR THE TERMS AND - CONDITIONS OF LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW. BY USING THE CONTENT, YOU AGREE THAT YOUR USE - OF THE CONTENT IS GOVERNED BY THIS AGREEMENT AND/OR THE TERMS AND CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR - NOTICES INDICATED OR REFERENCED BELOW. IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT AND THE TERMS AND - CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW, THEN YOU MAY NOT USE THE CONTENT.

- -

Applicable Licenses

- -

Unless otherwise indicated, all Content made available by the Eclipse Foundation is provided to you under the terms and conditions of the Eclipse Public License Version 1.0 - ("EPL"). A copy of the EPL is provided with this Content and is also available at http://www.eclipse.org/legal/epl-v10.html. - For purposes of the EPL, "Program" will mean the Content.

- -

Content includes, but is not limited to, source code, object code, documentation and other files maintained in the Eclipse Foundation source code - repository ("Repository") in software modules ("Modules") and made available as downloadable archives ("Downloads").

- -
    -
  • Content may be structured and packaged into modules to facilitate delivering, extending, and upgrading the Content. Typical modules may include plug-ins ("Plug-ins"), plug-in fragments ("Fragments"), and features ("Features").
  • -
  • Each Plug-in or Fragment may be packaged as a sub-directory or JAR (Java™ ARchive) in a directory named "plugins".
  • -
  • A Feature is a bundle of one or more Plug-ins and/or Fragments and associated material. Each Feature may be packaged as a sub-directory in a directory named "features". Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of the Plug-ins - and/or Fragments associated with that Feature.
  • -
  • Features may also include other Features ("Included Features"). Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of Included Features.
  • -
- -

The terms and conditions governing Plug-ins and Fragments should be contained in files named "about.html" ("Abouts"). The terms and conditions governing Features and -Included Features should be contained in files named "license.html" ("Feature Licenses"). Abouts and Feature Licenses may be located in any directory of a Download or Module -including, but not limited to the following locations:

- -
    -
  • The top-level (root) directory
  • -
  • Plug-in and Fragment directories
  • -
  • Inside Plug-ins and Fragments packaged as JARs
  • -
  • Sub-directories of the directory named "src" of certain Plug-ins
  • -
  • Feature directories
  • -
- -

Note: if a Feature made available by the Eclipse Foundation is installed using the Provisioning Technology (as defined below), you must agree to a license ("Feature Update License") during the -installation process. If the Feature contains Included Features, the Feature Update License should either provide you with the terms and conditions governing the Included Features or -inform you where you can locate them. Feature Update Licenses may be found in the "license" property of files named "feature.properties" found within a Feature. -Such Abouts, Feature Licenses, and Feature Update Licenses contain the terms and conditions (or references to such terms and conditions) that govern your use of the associated Content in -that directory.

- -

THE ABOUTS, FEATURE LICENSES, AND FEATURE UPDATE LICENSES MAY REFER TO THE EPL OR OTHER LICENSE AGREEMENTS, NOTICES OR TERMS AND CONDITIONS. SOME OF THESE -OTHER LICENSE AGREEMENTS MAY INCLUDE (BUT ARE NOT LIMITED TO):

- - - -

IT IS YOUR OBLIGATION TO READ AND ACCEPT ALL SUCH TERMS AND CONDITIONS PRIOR TO USE OF THE CONTENT. If no About, Feature License, or Feature Update License is provided, please -contact the Eclipse Foundation to determine what terms and conditions govern that particular Content.

- - -

Use of Provisioning Technology

- -

The Eclipse Foundation makes available provisioning software, examples of which include, but are not limited to, p2 and the Eclipse - Update Manager ("Provisioning Technology") for the purpose of allowing users to install software, documentation, information and/or - other materials (collectively "Installable Software"). This capability is provided with the intent of allowing such users to - install, extend and update Eclipse-based products. Information about packaging Installable Software is available at http://eclipse.org/equinox/p2/repository_packaging.html - ("Specification").

- -

You may use Provisioning Technology to allow other parties to install Installable Software. You shall be responsible for enabling the - applicable license agreements relating to the Installable Software to be presented to, and accepted by, the users of the Provisioning Technology - in accordance with the Specification. By using Provisioning Technology in such a manner and making it available in accordance with the - Specification, you further acknowledge your agreement to, and the acquisition of all necessary rights to permit the following:

- -
    -
  1. A series of actions may occur ("Provisioning Process") in which a user may execute the Provisioning Technology - on a machine ("Target Machine") with the intent of installing, extending or updating the functionality of an Eclipse-based - product.
  2. -
  3. During the Provisioning Process, the Provisioning Technology may cause third party Installable Software or a portion thereof to be - accessed and copied to the Target Machine.
  4. -
  5. Pursuant to the Specification, you will provide to the user the terms and conditions that govern the use of the Installable - Software ("Installable Software Agreement") and such Installable Software Agreement shall be accessed from the Target - Machine in accordance with the Specification. Such Installable Software Agreement must inform the user of the terms and conditions that govern - the Installable Software and must solicit acceptance by the end user in the manner prescribed in such Installable Software Agreement. Upon such - indication of agreement by the user, the provisioning Technology will complete installation of the Installable Software.
  6. -
- -

Cryptography

- -

Content may contain encryption software. The country in which you are currently may have restrictions on the import, possession, and use, and/or re-export to - another country, of encryption software. BEFORE using any encryption software, please check the country's laws, regulations and policies concerning the import, - possession, or use, and re-export of encryption software, to see if this is permitted.

- -

Java and all Java-based trademarks are trademarks of Oracle Corporation in the United States, other countries, or both.

- - diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/oops.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/oops.go deleted file mode 100644 index f15a9bae1..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/oops.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -func chkerr(e error) { - if e != nil { - panic(e) - } -} - -func chkcond(b bool) { - if !b { - panic("oops") - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/options.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/options.go deleted file mode 100644 index 156b7c0e5..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/options.go +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "crypto/tls" - "net/url" - "time" -) - -// MessageHandler is a callback type which can be set to be -// executed upon the arrival of messages published to topics -// to which the client is subscribed. -type MessageHandler func(*Client, Message) - -// ConnectionLostHandler is a callback type which can be set to be -// executed upon an unintended disconnection from the MQTT broker. -// Disconnects caused by calling Disconnect or ForceDisconnect will -// not cause an OnConnectionLost callback to execute. -type ConnectionLostHandler func(*Client, error) - -// OnConnectHandler is a callback that is called when the client -// state changes from unconnected/disconnected to connected. Both -// at initial connection and on reconnection -type OnConnectHandler func(*Client) - -// ClientOptions contains configurable options for an Client. -type ClientOptions struct { - Servers []*url.URL - ClientID string - Username string - Password string - CleanSession bool - Order bool - WillEnabled bool - WillTopic string - WillPayload []byte - WillQos byte - WillRetained bool - ProtocolVersion uint - protocolVersionExplicit bool - TLSConfig tls.Config - KeepAlive time.Duration - ConnectTimeout time.Duration - MaxReconnectInterval time.Duration - AutoReconnect bool - Store Store - DefaultPublishHander MessageHandler - OnConnect OnConnectHandler - OnConnectionLost ConnectionLostHandler - WriteTimeout time.Duration -} - -// NewClientOptions will create a new ClientClientOptions type with some -// default values. -// Port: 1883 -// CleanSession: True -// Order: True -// KeepAlive: 30 (seconds) -// ConnectTimeout: 30 (seconds) -// MaxReconnectInterval 10 (minutes) -// AutoReconnect: True -func NewClientOptions() *ClientOptions { - o := &ClientOptions{ - Servers: nil, - ClientID: "", - Username: "", - Password: "", - CleanSession: true, - Order: true, - WillEnabled: false, - WillTopic: "", - WillPayload: nil, - WillQos: 0, - WillRetained: false, - ProtocolVersion: 0, - protocolVersionExplicit: false, - TLSConfig: tls.Config{}, - KeepAlive: 30 * time.Second, - ConnectTimeout: 30 * time.Second, - MaxReconnectInterval: 10 * time.Minute, - AutoReconnect: true, - Store: nil, - OnConnect: nil, - OnConnectionLost: DefaultConnectionLostHandler, - WriteTimeout: 0, // 0 represents timeout disabled - } - return o -} - -// AddBroker adds a broker URI to the list of brokers to be used. The format should be -// scheme://host:port -// Where "scheme" is one of "tcp", "ssl", or "ws", "host" is the ip-address (or hostname) -// and "port" is the port on which the broker is accepting connections. -func (o *ClientOptions) AddBroker(server string) *ClientOptions { - brokerURI, _ := url.Parse(server) - o.Servers = append(o.Servers, brokerURI) - return o -} - -// SetClientID will set the client id to be used by this client when -// connecting to the MQTT broker. According to the MQTT v3.1 specification, -// a client id mus be no longer than 23 characters. -func (o *ClientOptions) SetClientID(id string) *ClientOptions { - o.ClientID = id - return o -} - -// SetUsername will set the username to be used by this client when connecting -// to the MQTT broker. Note: without the use of SSL/TLS, this information will -// be sent in plaintext accross the wire. -func (o *ClientOptions) SetUsername(u string) *ClientOptions { - o.Username = u - return o -} - -// SetPassword will set the password to be used by this client when connecting -// to the MQTT broker. Note: without the use of SSL/TLS, this information will -// be sent in plaintext accross the wire. -func (o *ClientOptions) SetPassword(p string) *ClientOptions { - o.Password = p - return o -} - -// SetCleanSession will set the "clean session" flag in the connect message -// when this client connects to an MQTT broker. By setting this flag, you are -// indicating that no messages saved by the broker for this client should be -// delivered. Any messages that were going to be sent by this client before -// diconnecting previously but didn't will not be sent upon connecting to the -// broker. -func (o *ClientOptions) SetCleanSession(clean bool) *ClientOptions { - o.CleanSession = clean - return o -} - -// SetOrderMatters will set the message routing to guarantee order within -// each QoS level. By default, this value is true. If set to false, -// this flag indicates that messages can be delivered asynchronously -// from the client to the application and possibly arrive out of order. -func (o *ClientOptions) SetOrderMatters(order bool) *ClientOptions { - o.Order = order - return o -} - -// SetTLSConfig will set an SSL/TLS configuration to be used when connecting -// to an MQTT broker. Please read the official Go documentation for more -// information. -func (o *ClientOptions) SetTLSConfig(t *tls.Config) *ClientOptions { - o.TLSConfig = *t - return o -} - -// SetStore will set the implementation of the Store interface -// used to provide message persistence in cases where QoS levels -// QoS_ONE or QoS_TWO are used. If no store is provided, then the -// client will use MemoryStore by default. -func (o *ClientOptions) SetStore(s Store) *ClientOptions { - o.Store = s - return o -} - -// SetKeepAlive will set the amount of time (in seconds) that the client -// should wait before sending a PING request to the broker. This will -// allow the client to know that a connection has not been lost with the -// server. -func (o *ClientOptions) SetKeepAlive(k time.Duration) *ClientOptions { - o.KeepAlive = k - return o -} - -// SetProtocolVersion sets the MQTT version to be used to connect to the -// broker. Legitimate values are currently 3 - MQTT 3.1 or 4 - MQTT 3.1.1 -func (o *ClientOptions) SetProtocolVersion(pv uint) *ClientOptions { - if pv >= 3 && pv <= 4 { - o.ProtocolVersion = pv - o.protocolVersionExplicit = true - } - return o -} - -// UnsetWill will cause any set will message to be disregarded. -func (o *ClientOptions) UnsetWill() *ClientOptions { - o.WillEnabled = false - return o -} - -// SetWill accepts a string will message to be set. When the client connects, -// it will give this will message to the broker, which will then publish the -// provided payload (the will) to any clients that are subscribed to the provided -// topic. -func (o *ClientOptions) SetWill(topic string, payload string, qos byte, retained bool) *ClientOptions { - o.SetBinaryWill(topic, []byte(payload), qos, retained) - return o -} - -// SetBinaryWill accepts a []byte will message to be set. When the client connects, -// it will give this will message to the broker, which will then publish the -// provided payload (the will) to any clients that are subscribed to the provided -// topic. -func (o *ClientOptions) SetBinaryWill(topic string, payload []byte, qos byte, retained bool) *ClientOptions { - o.WillEnabled = true - o.WillTopic = topic - o.WillPayload = payload - o.WillQos = qos - o.WillRetained = retained - return o -} - -// SetDefaultPublishHandler sets the MessageHandler that will be called when a message -// is received that does not match any known subscriptions. -func (o *ClientOptions) SetDefaultPublishHandler(defaultHandler MessageHandler) *ClientOptions { - o.DefaultPublishHander = defaultHandler - return o -} - -// SetOnConnectHandler sets the function to be called when the client is connected. Both -// at initial connection time and upon automatic reconnect. -func (o *ClientOptions) SetOnConnectHandler(onConn OnConnectHandler) *ClientOptions { - o.OnConnect = onConn - return o -} - -// SetConnectionLostHandler will set the OnConnectionLost callback to be executed -// in the case where the client unexpectedly loses connection with the MQTT broker. -func (o *ClientOptions) SetConnectionLostHandler(onLost ConnectionLostHandler) *ClientOptions { - o.OnConnectionLost = onLost - return o -} - -// SetWriteTimeout puts a limit on how long a mqtt publish should block until it unblocks with a -// timeout error. A duration of 0 never times out. Default 30 seconds -func (o *ClientOptions) SetWriteTimeout(t time.Duration) *ClientOptions { - o.WriteTimeout = t - return o -} - -// SetConnectTimeout limits how long the client will wait when trying to open a connection -// to an MQTT server before timeing out and erroring the attempt. A duration of 0 never times out. -// Default 30 seconds. Currently only operational on TCP/TLS connections. -func (o *ClientOptions) SetConnectTimeout(t time.Duration) *ClientOptions { - o.ConnectTimeout = t - return o -} - -// SetMaxReconnectInterval sets the maximum time that will be waited between reconnection attempts -// when connection is lost -func (o *ClientOptions) SetMaxReconnectInterval(t time.Duration) *ClientOptions { - o.MaxReconnectInterval = t - return o -} - -// SetAutoReconnect sets whether the automatic reconnection logic should be used -// when the connection is lost, even if disabled the ConnectionLostHandler is still -// called -func (o *ClientOptions) SetAutoReconnect(a bool) *ClientOptions { - o.AutoReconnect = a - return o -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/connack.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/connack.go deleted file mode 100644 index 729133767..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/connack.go +++ /dev/null @@ -1,57 +0,0 @@ -package packets - -import ( - "bytes" - "fmt" - "github.com/pborman/uuid" - "io" -) - -//ConnackPacket is an internal representation of the fields of the -//Connack MQTT packet -type ConnackPacket struct { - FixedHeader - TopicNameCompression byte - ReturnCode byte - uuid uuid.UUID -} - -func (ca *ConnackPacket) String() string { - str := fmt.Sprintf("%s\n", ca.FixedHeader) - str += fmt.Sprintf("returncode: %d", ca.ReturnCode) - return str -} - -func (ca *ConnackPacket) Write(w io.Writer) error { - var body bytes.Buffer - var err error - - body.WriteByte(ca.TopicNameCompression) - body.WriteByte(ca.ReturnCode) - ca.FixedHeader.RemainingLength = 2 - packet := ca.FixedHeader.pack() - packet.Write(body.Bytes()) - _, err = packet.WriteTo(w) - - return err -} - -//Unpack decodes the details of a ControlPacket after the fixed -//header has been read -func (ca *ConnackPacket) Unpack(b io.Reader) { - ca.TopicNameCompression = decodeByte(b) - ca.ReturnCode = decodeByte(b) -} - -//Details returns a Details struct containing the Qos and -//MessageID of this ControlPacket -func (ca *ConnackPacket) Details() Details { - return Details{Qos: 0, MessageID: 0} -} - -//UUID returns the unique ID assigned to the ControlPacket when -//it was originally received. Note: this is not related to the -//MessageID field for MQTT packets -func (ca *ConnackPacket) UUID() uuid.UUID { - return ca.uuid -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/connect.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/connect.go deleted file mode 100644 index 283007725..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/connect.go +++ /dev/null @@ -1,128 +0,0 @@ -package packets - -import ( - "bytes" - "fmt" - "github.com/pborman/uuid" - "io" -) - -//ConnectPacket is an internal representation of the fields of the -//Connect MQTT packet -type ConnectPacket struct { - FixedHeader - ProtocolName string - ProtocolVersion byte - CleanSession bool - WillFlag bool - WillQos byte - WillRetain bool - UsernameFlag bool - PasswordFlag bool - ReservedBit byte - KeepaliveTimer uint16 - - ClientIdentifier string - WillTopic string - WillMessage []byte - Username string - Password []byte - uuid uuid.UUID -} - -func (c *ConnectPacket) String() string { - str := fmt.Sprintf("%s\n", c.FixedHeader) - str += fmt.Sprintf("protocolversion: %d protocolname: %s cleansession: %t willflag: %t WillQos: %d WillRetain: %t Usernameflag: %t Passwordflag: %t keepalivetimer: %d\nclientId: %s\nwilltopic: %s\nwillmessage: %s\nUsername: %s\nPassword: %s\n", c.ProtocolVersion, c.ProtocolName, c.CleanSession, c.WillFlag, c.WillQos, c.WillRetain, c.UsernameFlag, c.PasswordFlag, c.KeepaliveTimer, c.ClientIdentifier, c.WillTopic, c.WillMessage, c.Username, c.Password) - return str -} - -func (c *ConnectPacket) Write(w io.Writer) error { - var body bytes.Buffer - var err error - - body.Write(encodeString(c.ProtocolName)) - body.WriteByte(c.ProtocolVersion) - body.WriteByte(boolToByte(c.CleanSession)<<1 | boolToByte(c.WillFlag)<<2 | c.WillQos<<3 | boolToByte(c.WillRetain)<<5 | boolToByte(c.PasswordFlag)<<6 | boolToByte(c.UsernameFlag)<<7) - body.Write(encodeUint16(c.KeepaliveTimer)) - body.Write(encodeString(c.ClientIdentifier)) - if c.WillFlag { - body.Write(encodeString(c.WillTopic)) - body.Write(encodeBytes(c.WillMessage)) - } - if c.UsernameFlag { - body.Write(encodeString(c.Username)) - } - if c.PasswordFlag { - body.Write(encodeBytes(c.Password)) - } - c.FixedHeader.RemainingLength = body.Len() - packet := c.FixedHeader.pack() - packet.Write(body.Bytes()) - _, err = packet.WriteTo(w) - - return err -} - -//Unpack decodes the details of a ControlPacket after the fixed -//header has been read -func (c *ConnectPacket) Unpack(b io.Reader) { - c.ProtocolName = decodeString(b) - c.ProtocolVersion = decodeByte(b) - options := decodeByte(b) - c.ReservedBit = 1 & options - c.CleanSession = 1&(options>>1) > 0 - c.WillFlag = 1&(options>>2) > 0 - c.WillQos = 3 & (options >> 3) - c.WillRetain = 1&(options>>5) > 0 - c.PasswordFlag = 1&(options>>6) > 0 - c.UsernameFlag = 1&(options>>7) > 0 - c.KeepaliveTimer = decodeUint16(b) - c.ClientIdentifier = decodeString(b) - if c.WillFlag { - c.WillTopic = decodeString(b) - c.WillMessage = decodeBytes(b) - } - if c.UsernameFlag { - c.Username = decodeString(b) - } - if c.PasswordFlag { - c.Password = decodeBytes(b) - } -} - -//Validate performs validation of the fields of a Connect packet -func (c *ConnectPacket) Validate() byte { - if c.PasswordFlag && !c.UsernameFlag { - return ErrRefusedBadUsernameOrPassword - } - if c.ReservedBit != 0 { - //Bad reserved bit - return ErrProtocolViolation - } - if (c.ProtocolName == "MQIsdp" && c.ProtocolVersion != 3) || (c.ProtocolName == "MQTT" && c.ProtocolVersion != 4) { - //Mismatched or unsupported protocol version - return ErrRefusedBadProtocolVersion - } - if c.ProtocolName != "MQIsdp" && c.ProtocolName != "MQTT" { - //Bad protocol name - return ErrProtocolViolation - } - if len(c.ClientIdentifier) > 65535 || len(c.Username) > 65535 || len(c.Password) > 65535 { - //Bad size field - return ErrProtocolViolation - } - return Accepted -} - -//Details returns a Details struct containing the Qos and -//MessageID of this ControlPacket -func (c *ConnectPacket) Details() Details { - return Details{Qos: 0, MessageID: 0} -} - -//UUID returns the unique ID assigned to the ControlPacket when -//it was originally received. Note: this is not related to the -//MessageID field for MQTT packets -func (c *ConnectPacket) UUID() uuid.UUID { - return c.uuid -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/disconnect.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/disconnect.go deleted file mode 100644 index 2f005fb35..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/disconnect.go +++ /dev/null @@ -1,44 +0,0 @@ -package packets - -import ( - "fmt" - "github.com/pborman/uuid" - "io" -) - -//DisconnectPacket is an internal representation of the fields of the -//Disconnect MQTT packet -type DisconnectPacket struct { - FixedHeader - uuid uuid.UUID -} - -func (d *DisconnectPacket) String() string { - str := fmt.Sprintf("%s\n", d.FixedHeader) - return str -} - -func (d *DisconnectPacket) Write(w io.Writer) error { - packet := d.FixedHeader.pack() - _, err := packet.WriteTo(w) - - return err -} - -//Unpack decodes the details of a ControlPacket after the fixed -//header has been read -func (d *DisconnectPacket) Unpack(b io.Reader) { -} - -//Details returns a Details struct containing the Qos and -//MessageID of this ControlPacket -func (d *DisconnectPacket) Details() Details { - return Details{Qos: 0, MessageID: 0} -} - -//UUID returns the unique ID assigned to the ControlPacket when -//it was originally received. Note: this is not related to the -//MessageID field for MQTT packets -func (d *DisconnectPacket) UUID() uuid.UUID { - return d.uuid -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/packets.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/packets.go deleted file mode 100644 index 4cdf3b1f5..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/packets.go +++ /dev/null @@ -1,324 +0,0 @@ -package packets - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "github.com/pborman/uuid" - "io" -) - -//ControlPacket defines the interface for structs intended to hold -//decoded MQTT packets, either from being read or before being -//written -type ControlPacket interface { - Write(io.Writer) error - Unpack(io.Reader) - String() string - Details() Details - UUID() uuid.UUID -} - -//PacketNames maps the constants for each of the MQTT packet types -//to a string representation of their name. -var PacketNames = map[uint8]string{ - 1: "CONNECT", - 2: "CONNACK", - 3: "PUBLISH", - 4: "PUBACK", - 5: "PUBREC", - 6: "PUBREL", - 7: "PUBCOMP", - 8: "SUBSCRIBE", - 9: "SUBACK", - 10: "UNSUBSCRIBE", - 11: "UNSUBACK", - 12: "PINGREQ", - 13: "PINGRESP", - 14: "DISCONNECT", -} - -//Below are the constants assigned to each of the MQTT packet types -const ( - Connect = 1 - Connack = 2 - Publish = 3 - Puback = 4 - Pubrec = 5 - Pubrel = 6 - Pubcomp = 7 - Subscribe = 8 - Suback = 9 - Unsubscribe = 10 - Unsuback = 11 - Pingreq = 12 - Pingresp = 13 - Disconnect = 14 -) - -//Below are the const definitions for error codes returned by -//Connect() -const ( - Accepted = 0x00 - ErrRefusedBadProtocolVersion = 0x01 - ErrRefusedIDRejected = 0x02 - ErrRefusedServerUnavailable = 0x03 - ErrRefusedBadUsernameOrPassword = 0x04 - ErrRefusedNotAuthorised = 0x05 - ErrNetworkError = 0xFE - ErrProtocolViolation = 0xFF -) - -//ConnackReturnCodes is a map of the error codes constants for Connect() -//to a string representation of the error -var ConnackReturnCodes = map[uint8]string{ - 0: "Connection Accepted", - 1: "Connection Refused: Bad Protocol Version", - 2: "Connection Refused: Client Identifier Rejected", - 3: "Connection Refused: Server Unavailable", - 4: "Connection Refused: Username or Password in unknown format", - 5: "Connection Refused: Not Authorised", - 254: "Connection Error", - 255: "Connection Refused: Protocol Violation", -} - -//ConnErrors is a map of the errors codes constants for Connect() -//to a Go error -var ConnErrors = map[byte]error{ - Accepted: nil, - ErrRefusedBadProtocolVersion: errors.New("Unnacceptable protocol version"), - ErrRefusedIDRejected: errors.New("Identifier rejected"), - ErrRefusedServerUnavailable: errors.New("Server Unavailable"), - ErrRefusedBadUsernameOrPassword: errors.New("Bad user name or password"), - ErrRefusedNotAuthorised: errors.New("Not Authorized"), - ErrNetworkError: errors.New("Network Error"), - ErrProtocolViolation: errors.New("Protocol Violation"), -} - -//ReadPacket takes an instance of an io.Reader (such as net.Conn) and attempts -//to read an MQTT packet from the stream. It returns a ControlPacket -//representing the decoded MQTT packet and an error. One of these returns will -//always be nil, a nil ControlPacket indicating an error occurred. -func ReadPacket(r io.Reader) (cp ControlPacket, err error) { - var fh FixedHeader - b := make([]byte, 1) - - _, err = io.ReadFull(r, b) - if err != nil { - return nil, err - } - fh.unpack(b[0], r) - cp = NewControlPacketWithHeader(fh) - if cp == nil { - return nil, errors.New("Bad data from client") - } - packetBytes := make([]byte, fh.RemainingLength) - _, err = io.ReadFull(r, packetBytes) - if err != nil { - return nil, err - } - cp.Unpack(bytes.NewBuffer(packetBytes)) - return cp, nil -} - -//NewControlPacket is used to create a new ControlPacket of the type specified -//by packetType, this is usually done by reference to the packet type constants -//defined in packets.go. The newly created ControlPacket is empty and a pointer -//is returned. -func NewControlPacket(packetType byte) (cp ControlPacket) { - switch packetType { - case Connect: - cp = &ConnectPacket{FixedHeader: FixedHeader{MessageType: Connect}, uuid: uuid.NewUUID()} - case Connack: - cp = &ConnackPacket{FixedHeader: FixedHeader{MessageType: Connack}, uuid: uuid.NewUUID()} - case Disconnect: - cp = &DisconnectPacket{FixedHeader: FixedHeader{MessageType: Disconnect}, uuid: uuid.NewUUID()} - case Publish: - cp = &PublishPacket{FixedHeader: FixedHeader{MessageType: Publish}, uuid: uuid.NewUUID()} - case Puback: - cp = &PubackPacket{FixedHeader: FixedHeader{MessageType: Puback}, uuid: uuid.NewUUID()} - case Pubrec: - cp = &PubrecPacket{FixedHeader: FixedHeader{MessageType: Pubrec}, uuid: uuid.NewUUID()} - case Pubrel: - cp = &PubrelPacket{FixedHeader: FixedHeader{MessageType: Pubrel, Qos: 1}, uuid: uuid.NewUUID()} - case Pubcomp: - cp = &PubcompPacket{FixedHeader: FixedHeader{MessageType: Pubcomp}, uuid: uuid.NewUUID()} - case Subscribe: - cp = &SubscribePacket{FixedHeader: FixedHeader{MessageType: Subscribe, Qos: 1}, uuid: uuid.NewUUID()} - case Suback: - cp = &SubackPacket{FixedHeader: FixedHeader{MessageType: Suback}, uuid: uuid.NewUUID()} - case Unsubscribe: - cp = &UnsubscribePacket{FixedHeader: FixedHeader{MessageType: Unsubscribe, Qos: 1}, uuid: uuid.NewUUID()} - case Unsuback: - cp = &UnsubackPacket{FixedHeader: FixedHeader{MessageType: Unsuback}, uuid: uuid.NewUUID()} - case Pingreq: - cp = &PingreqPacket{FixedHeader: FixedHeader{MessageType: Pingreq}, uuid: uuid.NewUUID()} - case Pingresp: - cp = &PingrespPacket{FixedHeader: FixedHeader{MessageType: Pingresp}, uuid: uuid.NewUUID()} - default: - return nil - } - return cp -} - -//NewControlPacketWithHeader is used to create a new ControlPacket of the type -//specified within the FixedHeader that is passed to the function. -//The newly created ControlPacket is empty and a pointer is returned. -func NewControlPacketWithHeader(fh FixedHeader) (cp ControlPacket) { - switch fh.MessageType { - case Connect: - cp = &ConnectPacket{FixedHeader: fh, uuid: uuid.NewUUID()} - case Connack: - cp = &ConnackPacket{FixedHeader: fh, uuid: uuid.NewUUID()} - case Disconnect: - cp = &DisconnectPacket{FixedHeader: fh, uuid: uuid.NewUUID()} - case Publish: - cp = &PublishPacket{FixedHeader: fh, uuid: uuid.NewUUID()} - case Puback: - cp = &PubackPacket{FixedHeader: fh, uuid: uuid.NewUUID()} - case Pubrec: - cp = &PubrecPacket{FixedHeader: fh, uuid: uuid.NewUUID()} - case Pubrel: - cp = &PubrelPacket{FixedHeader: fh, uuid: uuid.NewUUID()} - case Pubcomp: - cp = &PubcompPacket{FixedHeader: fh, uuid: uuid.NewUUID()} - case Subscribe: - cp = &SubscribePacket{FixedHeader: fh, uuid: uuid.NewUUID()} - case Suback: - cp = &SubackPacket{FixedHeader: fh, uuid: uuid.NewUUID()} - case Unsubscribe: - cp = &UnsubscribePacket{FixedHeader: fh, uuid: uuid.NewUUID()} - case Unsuback: - cp = &UnsubackPacket{FixedHeader: fh, uuid: uuid.NewUUID()} - case Pingreq: - cp = &PingreqPacket{FixedHeader: fh, uuid: uuid.NewUUID()} - case Pingresp: - cp = &PingrespPacket{FixedHeader: fh, uuid: uuid.NewUUID()} - default: - return nil - } - return cp -} - -//Details struct returned by the Details() function called on -//ControlPackets to present details of the Qos and MessageID -//of the ControlPacket -type Details struct { - Qos byte - MessageID uint16 -} - -//FixedHeader is a struct to hold the decoded information from -//the fixed header of an MQTT ControlPacket -type FixedHeader struct { - MessageType byte - Dup bool - Qos byte - Retain bool - RemainingLength int -} - -func (fh FixedHeader) String() string { - return fmt.Sprintf("%s: dup: %t qos: %d retain: %t rLength: %d", PacketNames[fh.MessageType], fh.Dup, fh.Qos, fh.Retain, fh.RemainingLength) -} - -func boolToByte(b bool) byte { - switch b { - case true: - return 1 - default: - return 0 - } -} - -func (fh *FixedHeader) pack() bytes.Buffer { - var header bytes.Buffer - header.WriteByte(fh.MessageType<<4 | boolToByte(fh.Dup)<<3 | fh.Qos<<1 | boolToByte(fh.Retain)) - header.Write(encodeLength(fh.RemainingLength)) - return header -} - -func (fh *FixedHeader) unpack(typeAndFlags byte, r io.Reader) { - fh.MessageType = typeAndFlags >> 4 - fh.Dup = (typeAndFlags>>3)&0x01 > 0 - fh.Qos = (typeAndFlags >> 1) & 0x03 - fh.Retain = typeAndFlags&0x01 > 0 - fh.RemainingLength = decodeLength(r) -} - -func decodeByte(b io.Reader) byte { - num := make([]byte, 1) - b.Read(num) - return num[0] -} - -func decodeUint16(b io.Reader) uint16 { - num := make([]byte, 2) - b.Read(num) - return binary.BigEndian.Uint16(num) -} - -func encodeUint16(num uint16) []byte { - bytes := make([]byte, 2) - binary.BigEndian.PutUint16(bytes, num) - return bytes -} - -func encodeString(field string) []byte { - fieldLength := make([]byte, 2) - binary.BigEndian.PutUint16(fieldLength, uint16(len(field))) - return append(fieldLength, []byte(field)...) -} - -func decodeString(b io.Reader) string { - fieldLength := decodeUint16(b) - field := make([]byte, fieldLength) - b.Read(field) - return string(field) -} - -func decodeBytes(b io.Reader) []byte { - fieldLength := decodeUint16(b) - field := make([]byte, fieldLength) - b.Read(field) - return field -} - -func encodeBytes(field []byte) []byte { - fieldLength := make([]byte, 2) - binary.BigEndian.PutUint16(fieldLength, uint16(len(field))) - return append(fieldLength, field...) -} - -func encodeLength(length int) []byte { - var encLength []byte - for { - digit := byte(length % 128) - length /= 128 - if length > 0 { - digit |= 0x80 - } - encLength = append(encLength, digit) - if length == 0 { - break - } - } - return encLength -} - -func decodeLength(r io.Reader) int { - var rLength uint32 - var multiplier uint32 - b := make([]byte, 1) - for { - io.ReadFull(r, b) - digit := b[0] - rLength |= uint32(digit&127) << multiplier - if (digit & 128) == 0 { - break - } - multiplier += 7 - } - return int(rLength) -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/packets_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/packets_test.go deleted file mode 100644 index 51d887d08..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/packets_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package packets - -import ( - "bytes" - "testing" -) - -func TestPacketNames(t *testing.T) { - if PacketNames[1] != "CONNECT" { - t.Errorf("PacketNames[1] is %s, should be %s", PacketNames[1], "CONNECT") - } - if PacketNames[2] != "CONNACK" { - t.Errorf("PacketNames[2] is %s, should be %s", PacketNames[2], "CONNACK") - } - if PacketNames[3] != "PUBLISH" { - t.Errorf("PacketNames[3] is %s, should be %s", PacketNames[3], "PUBLISH") - } - if PacketNames[4] != "PUBACK" { - t.Errorf("PacketNames[4] is %s, should be %s", PacketNames[4], "PUBACK") - } - if PacketNames[5] != "PUBREC" { - t.Errorf("PacketNames[5] is %s, should be %s", PacketNames[5], "PUBREC") - } - if PacketNames[6] != "PUBREL" { - t.Errorf("PacketNames[6] is %s, should be %s", PacketNames[6], "PUBREL") - } - if PacketNames[7] != "PUBCOMP" { - t.Errorf("PacketNames[7] is %s, should be %s", PacketNames[7], "PUBCOMP") - } - if PacketNames[8] != "SUBSCRIBE" { - t.Errorf("PacketNames[8] is %s, should be %s", PacketNames[8], "SUBSCRIBE") - } - if PacketNames[9] != "SUBACK" { - t.Errorf("PacketNames[9] is %s, should be %s", PacketNames[9], "SUBACK") - } - if PacketNames[10] != "UNSUBSCRIBE" { - t.Errorf("PacketNames[10] is %s, should be %s", PacketNames[10], "UNSUBSCRIBE") - } - if PacketNames[11] != "UNSUBACK" { - t.Errorf("PacketNames[11] is %s, should be %s", PacketNames[11], "UNSUBACK") - } - if PacketNames[12] != "PINGREQ" { - t.Errorf("PacketNames[12] is %s, should be %s", PacketNames[12], "PINGREQ") - } - if PacketNames[13] != "PINGRESP" { - t.Errorf("PacketNames[13] is %s, should be %s", PacketNames[13], "PINGRESP") - } - if PacketNames[14] != "DISCONNECT" { - t.Errorf("PacketNames[14] is %s, should be %s", PacketNames[14], "DISCONNECT") - } -} - -func TestPacketConsts(t *testing.T) { - if Connect != 1 { - t.Errorf("Const for Connect is %d, should be %d", Connect, 1) - } - if Connack != 2 { - t.Errorf("Const for Connack is %d, should be %d", Connack, 2) - } - if Publish != 3 { - t.Errorf("Const for Publish is %d, should be %d", Publish, 3) - } - if Puback != 4 { - t.Errorf("Const for Puback is %d, should be %d", Puback, 4) - } - if Pubrec != 5 { - t.Errorf("Const for Pubrec is %d, should be %d", Pubrec, 5) - } - if Pubrel != 6 { - t.Errorf("Const for Pubrel is %d, should be %d", Pubrel, 6) - } - if Pubcomp != 7 { - t.Errorf("Const for Pubcomp is %d, should be %d", Pubcomp, 7) - } - if Subscribe != 8 { - t.Errorf("Const for Subscribe is %d, should be %d", Subscribe, 8) - } - if Suback != 9 { - t.Errorf("Const for Suback is %d, should be %d", Suback, 9) - } - if Unsubscribe != 10 { - t.Errorf("Const for Unsubscribe is %d, should be %d", Unsubscribe, 10) - } - if Unsuback != 11 { - t.Errorf("Const for Unsuback is %d, should be %d", Unsuback, 11) - } - if Pingreq != 12 { - t.Errorf("Const for Pingreq is %d, should be %d", Pingreq, 12) - } - if Pingresp != 13 { - t.Errorf("Const for Pingresp is %d, should be %d", Pingresp, 13) - } - if Disconnect != 14 { - t.Errorf("Const for Disconnect is %d, should be %d", Disconnect, 14) - } -} - -func TestConnackConsts(t *testing.T) { - if Accepted != 0x00 { - t.Errorf("Const for Accepted is %d, should be %d", Accepted, 0) - } - if ErrRefusedBadProtocolVersion != 0x01 { - t.Errorf("Const for RefusedBadProtocolVersion is %d, should be %d", ErrRefusedBadProtocolVersion, 1) - } - if ErrRefusedIDRejected != 0x02 { - t.Errorf("Const for RefusedIDRejected is %d, should be %d", ErrRefusedIDRejected, 2) - } - if ErrRefusedServerUnavailable != 0x03 { - t.Errorf("Const for RefusedServerUnavailable is %d, should be %d", ErrRefusedServerUnavailable, 3) - } - if ErrRefusedBadUsernameOrPassword != 0x04 { - t.Errorf("Const for RefusedBadUsernameOrPassword is %d, should be %d", ErrRefusedBadUsernameOrPassword, 4) - } - if ErrRefusedNotAuthorised != 0x05 { - t.Errorf("Const for RefusedNotAuthorised is %d, should be %d", ErrRefusedNotAuthorised, 5) - } -} - -func TestConnectPacket(t *testing.T) { - connectPacketBytes := bytes.NewBuffer([]byte{16, 52, 0, 4, 77, 81, 84, 84, 4, 204, 0, 0, 0, 0, 0, 4, 116, 101, 115, 116, 0, 12, 84, 101, 115, 116, 32, 80, 97, 121, 108, 111, 97, 100, 0, 8, 116, 101, 115, 116, 117, 115, 101, 114, 0, 8, 116, 101, 115, 116, 112, 97, 115, 115}) - packet, err := ReadPacket(connectPacketBytes) - if err != nil { - t.Fatalf("Error reading packet: %s", err.Error()) - } - cp := packet.(*ConnectPacket) - if cp.ProtocolName != "MQTT" { - t.Errorf("Connect Packet ProtocolName is %s, should be %s", cp.ProtocolName, "MQTT") - } - if cp.ProtocolVersion != 4 { - t.Errorf("Connect Packet ProtocolVersion is %d, should be %d", cp.ProtocolVersion, 4) - } - if cp.UsernameFlag != true { - t.Errorf("Connect Packet UsernameFlag is %t, should be %t", cp.UsernameFlag, true) - } - if cp.Username != "testuser" { - t.Errorf("Connect Packet Username is %s, should be %s", cp.Username, "testuser") - } - if cp.PasswordFlag != true { - t.Errorf("Connect Packet PasswordFlag is %t, should be %t", cp.PasswordFlag, true) - } - if string(cp.Password) != "testpass" { - t.Errorf("Connect Packet Password is %s, should be %s", string(cp.Password), "testpass") - } - if cp.WillFlag != true { - t.Errorf("Connect Packet WillFlag is %t, should be %t", cp.WillFlag, true) - } - if cp.WillTopic != "test" { - t.Errorf("Connect Packet WillTopic is %s, should be %s", cp.WillTopic, "test") - } - if cp.WillQos != 1 { - t.Errorf("Connect Packet WillQos is %d, should be %d", cp.WillQos, 1) - } - if cp.WillRetain != false { - t.Errorf("Connect Packet WillRetain is %t, should be %t", cp.WillRetain, false) - } - if string(cp.WillMessage) != "Test Payload" { - t.Errorf("Connect Packet WillMessage is %s, should be %s", string(cp.WillMessage), "Test Payload") - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pingreq.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pingreq.go deleted file mode 100644 index 216a5f8fc..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pingreq.go +++ /dev/null @@ -1,44 +0,0 @@ -package packets - -import ( - "fmt" - "github.com/pborman/uuid" - "io" -) - -//PingreqPacket is an internal representation of the fields of the -//Pingreq MQTT packet -type PingreqPacket struct { - FixedHeader - uuid uuid.UUID -} - -func (pr *PingreqPacket) String() string { - str := fmt.Sprintf("%s", pr.FixedHeader) - return str -} - -func (pr *PingreqPacket) Write(w io.Writer) error { - packet := pr.FixedHeader.pack() - _, err := packet.WriteTo(w) - - return err -} - -//Unpack decodes the details of a ControlPacket after the fixed -//header has been read -func (pr *PingreqPacket) Unpack(b io.Reader) { -} - -//Details returns a Details struct containing the Qos and -//MessageID of this ControlPacket -func (pr *PingreqPacket) Details() Details { - return Details{Qos: 0, MessageID: 0} -} - -//UUID returns the unique ID assigned to the ControlPacket when -//it was originally received. Note: this is not related to the -//MessageID field for MQTT packets -func (pr *PingreqPacket) UUID() uuid.UUID { - return pr.uuid -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pingresp.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pingresp.go deleted file mode 100644 index 4658def97..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pingresp.go +++ /dev/null @@ -1,44 +0,0 @@ -package packets - -import ( - "fmt" - "github.com/pborman/uuid" - "io" -) - -//PingrespPacket is an internal representation of the fields of the -//Pingresp MQTT packet -type PingrespPacket struct { - FixedHeader - uuid uuid.UUID -} - -func (pr *PingrespPacket) String() string { - str := fmt.Sprintf("%s", pr.FixedHeader) - return str -} - -func (pr *PingrespPacket) Write(w io.Writer) error { - packet := pr.FixedHeader.pack() - _, err := packet.WriteTo(w) - - return err -} - -//Unpack decodes the details of a ControlPacket after the fixed -//header has been read -func (pr *PingrespPacket) Unpack(b io.Reader) { -} - -//Details returns a Details struct containing the Qos and -//MessageID of this ControlPacket -func (pr *PingrespPacket) Details() Details { - return Details{Qos: 0, MessageID: 0} -} - -//UUID returns the unique ID assigned to the ControlPacket when -//it was originally received. Note: this is not related to the -//MessageID field for MQTT packets -func (pr *PingrespPacket) UUID() uuid.UUID { - return pr.uuid -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/puback.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/puback.go deleted file mode 100644 index a3fe5db31..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/puback.go +++ /dev/null @@ -1,50 +0,0 @@ -package packets - -import ( - "fmt" - "github.com/pborman/uuid" - "io" -) - -//PubackPacket is an internal representation of the fields of the -//Puback MQTT packet -type PubackPacket struct { - FixedHeader - MessageID uint16 - uuid uuid.UUID -} - -func (pa *PubackPacket) String() string { - str := fmt.Sprintf("%s\n", pa.FixedHeader) - str += fmt.Sprintf("messageID: %d", pa.MessageID) - return str -} - -func (pa *PubackPacket) Write(w io.Writer) error { - var err error - pa.FixedHeader.RemainingLength = 2 - packet := pa.FixedHeader.pack() - packet.Write(encodeUint16(pa.MessageID)) - _, err = packet.WriteTo(w) - - return err -} - -//Unpack decodes the details of a ControlPacket after the fixed -//header has been read -func (pa *PubackPacket) Unpack(b io.Reader) { - pa.MessageID = decodeUint16(b) -} - -//Details returns a Details struct containing the Qos and -//MessageID of this ControlPacket -func (pa *PubackPacket) Details() Details { - return Details{Qos: pa.Qos, MessageID: pa.MessageID} -} - -//UUID returns the unique ID assigned to the ControlPacket when -//it was originally received. Note: this is not related to the -//MessageID field for MQTT packets -func (pa *PubackPacket) UUID() uuid.UUID { - return pa.uuid -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubcomp.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubcomp.go deleted file mode 100644 index 0cd5c860c..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubcomp.go +++ /dev/null @@ -1,50 +0,0 @@ -package packets - -import ( - "fmt" - "github.com/pborman/uuid" - "io" -) - -//PubcompPacket is an internal representation of the fields of the -//Pubcomp MQTT packet -type PubcompPacket struct { - FixedHeader - MessageID uint16 - uuid uuid.UUID -} - -func (pc *PubcompPacket) String() string { - str := fmt.Sprintf("%s\n", pc.FixedHeader) - str += fmt.Sprintf("MessageID: %d", pc.MessageID) - return str -} - -func (pc *PubcompPacket) Write(w io.Writer) error { - var err error - pc.FixedHeader.RemainingLength = 2 - packet := pc.FixedHeader.pack() - packet.Write(encodeUint16(pc.MessageID)) - _, err = packet.WriteTo(w) - - return err -} - -//Unpack decodes the details of a ControlPacket after the fixed -//header has been read -func (pc *PubcompPacket) Unpack(b io.Reader) { - pc.MessageID = decodeUint16(b) -} - -//Details returns a Details struct containing the Qos and -//MessageID of this ControlPacket -func (pc *PubcompPacket) Details() Details { - return Details{Qos: pc.Qos, MessageID: pc.MessageID} -} - -//UUID returns the unique ID assigned to the ControlPacket when -//it was originally received. Note: this is not related to the -//MessageID field for MQTT packets -func (pc *PubcompPacket) UUID() uuid.UUID { - return pc.uuid -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/publish.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/publish.go deleted file mode 100644 index cb886cf78..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/publish.go +++ /dev/null @@ -1,82 +0,0 @@ -package packets - -import ( - "bytes" - "fmt" - "github.com/pborman/uuid" - "io" -) - -//PublishPacket is an internal representation of the fields of the -//Publish MQTT packet -type PublishPacket struct { - FixedHeader - TopicName string - MessageID uint16 - Payload []byte - uuid uuid.UUID -} - -func (p *PublishPacket) String() string { - str := fmt.Sprintf("%s\n", p.FixedHeader) - str += fmt.Sprintf("topicName: %s MessageID: %d\n", p.TopicName, p.MessageID) - str += fmt.Sprintf("payload: %s\n", string(p.Payload)) - return str -} - -func (p *PublishPacket) Write(w io.Writer) error { - var body bytes.Buffer - var err error - - body.Write(encodeString(p.TopicName)) - if p.Qos > 0 { - body.Write(encodeUint16(p.MessageID)) - } - p.FixedHeader.RemainingLength = body.Len() + len(p.Payload) - packet := p.FixedHeader.pack() - packet.Write(body.Bytes()) - packet.Write(p.Payload) - _, err = w.Write(packet.Bytes()) - - return err -} - -//Unpack decodes the details of a ControlPacket after the fixed -//header has been read -func (p *PublishPacket) Unpack(b io.Reader) { - var payloadLength = p.FixedHeader.RemainingLength - p.TopicName = decodeString(b) - if p.Qos > 0 { - p.MessageID = decodeUint16(b) - payloadLength -= len(p.TopicName) + 4 - } else { - payloadLength -= len(p.TopicName) + 2 - } - p.Payload = make([]byte, payloadLength) - b.Read(p.Payload) -} - -//Copy creates a new PublishPacket with the same topic and payload -//but an empty fixed header, useful for when you want to deliver -//a message with different properties such as Qos but the same -//content -func (p *PublishPacket) Copy() *PublishPacket { - newP := NewControlPacket(Publish).(*PublishPacket) - newP.TopicName = p.TopicName - newP.Payload = p.Payload - - return newP -} - -//Details returns a Details struct containing the Qos and -//MessageID of this ControlPacket -func (p *PublishPacket) Details() Details { - return Details{Qos: p.Qos, MessageID: p.MessageID} -} - -//UUID returns the unique ID assigned to the ControlPacket when -//it was originally received. Note: this is not related to the -//MessageID field for MQTT packets -func (p *PublishPacket) UUID() uuid.UUID { - return p.uuid -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubrec.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubrec.go deleted file mode 100644 index b83914bc9..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubrec.go +++ /dev/null @@ -1,50 +0,0 @@ -package packets - -import ( - "fmt" - "github.com/pborman/uuid" - "io" -) - -//PubrecPacket is an internal representation of the fields of the -//Pubrec MQTT packet -type PubrecPacket struct { - FixedHeader - MessageID uint16 - uuid uuid.UUID -} - -func (pr *PubrecPacket) String() string { - str := fmt.Sprintf("%s\n", pr.FixedHeader) - str += fmt.Sprintf("MessageID: %d", pr.MessageID) - return str -} - -func (pr *PubrecPacket) Write(w io.Writer) error { - var err error - pr.FixedHeader.RemainingLength = 2 - packet := pr.FixedHeader.pack() - packet.Write(encodeUint16(pr.MessageID)) - _, err = packet.WriteTo(w) - - return err -} - -//Unpack decodes the details of a ControlPacket after the fixed -//header has been read -func (pr *PubrecPacket) Unpack(b io.Reader) { - pr.MessageID = decodeUint16(b) -} - -//Details returns a Details struct containing the Qos and -//MessageID of this ControlPacket -func (pr *PubrecPacket) Details() Details { - return Details{Qos: pr.Qos, MessageID: pr.MessageID} -} - -//UUID returns the unique ID assigned to the ControlPacket when -//it was originally received. Note: this is not related to the -//MessageID field for MQTT packets -func (pr *PubrecPacket) UUID() uuid.UUID { - return pr.uuid -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubrel.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubrel.go deleted file mode 100644 index 14bfffdda..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubrel.go +++ /dev/null @@ -1,50 +0,0 @@ -package packets - -import ( - "fmt" - "github.com/pborman/uuid" - "io" -) - -//PubrelPacket is an internal representation of the fields of the -//Pubrel MQTT packet -type PubrelPacket struct { - FixedHeader - MessageID uint16 - uuid uuid.UUID -} - -func (pr *PubrelPacket) String() string { - str := fmt.Sprintf("%s\n", pr.FixedHeader) - str += fmt.Sprintf("MessageID: %d", pr.MessageID) - return str -} - -func (pr *PubrelPacket) Write(w io.Writer) error { - var err error - pr.FixedHeader.RemainingLength = 2 - packet := pr.FixedHeader.pack() - packet.Write(encodeUint16(pr.MessageID)) - _, err = packet.WriteTo(w) - - return err -} - -//Unpack decodes the details of a ControlPacket after the fixed -//header has been read -func (pr *PubrelPacket) Unpack(b io.Reader) { - pr.MessageID = decodeUint16(b) -} - -//Details returns a Details struct containing the Qos and -//MessageID of this ControlPacket -func (pr *PubrelPacket) Details() Details { - return Details{Qos: pr.Qos, MessageID: pr.MessageID} -} - -//UUID returns the unique ID assigned to the ControlPacket when -//it was originally received. Note: this is not related to the -//MessageID field for MQTT packets -func (pr *PubrelPacket) UUID() uuid.UUID { - return pr.uuid -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/suback.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/suback.go deleted file mode 100644 index 0bd3b665d..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/suback.go +++ /dev/null @@ -1,58 +0,0 @@ -package packets - -import ( - "bytes" - "fmt" - "github.com/pborman/uuid" - "io" -) - -//SubackPacket is an internal representation of the fields of the -//Suback MQTT packet -type SubackPacket struct { - FixedHeader - MessageID uint16 - GrantedQoss []byte - uuid uuid.UUID -} - -func (sa *SubackPacket) String() string { - str := fmt.Sprintf("%s\n", sa.FixedHeader) - str += fmt.Sprintf("MessageID: %d", sa.MessageID) - return str -} - -func (sa *SubackPacket) Write(w io.Writer) error { - var body bytes.Buffer - var err error - body.Write(encodeUint16(sa.MessageID)) - body.Write(sa.GrantedQoss) - sa.FixedHeader.RemainingLength = body.Len() - packet := sa.FixedHeader.pack() - packet.Write(body.Bytes()) - _, err = packet.WriteTo(w) - - return err -} - -//Unpack decodes the details of a ControlPacket after the fixed -//header has been read -func (sa *SubackPacket) Unpack(b io.Reader) { - var qosBuffer bytes.Buffer - sa.MessageID = decodeUint16(b) - qosBuffer.ReadFrom(b) - sa.GrantedQoss = qosBuffer.Bytes() -} - -//Details returns a Details struct containing the Qos and -//MessageID of this ControlPacket -func (sa *SubackPacket) Details() Details { - return Details{Qos: 0, MessageID: sa.MessageID} -} - -//UUID returns the unique ID assigned to the ControlPacket when -//it was originally received. Note: this is not related to the -//MessageID field for MQTT packets -func (sa *SubackPacket) UUID() uuid.UUID { - return sa.uuid -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/subscribe.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/subscribe.go deleted file mode 100644 index 0aff19aac..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/subscribe.go +++ /dev/null @@ -1,68 +0,0 @@ -package packets - -import ( - "bytes" - "fmt" - "github.com/pborman/uuid" - "io" -) - -//SubscribePacket is an internal representation of the fields of the -//Subscribe MQTT packet -type SubscribePacket struct { - FixedHeader - MessageID uint16 - Topics []string - Qoss []byte - uuid uuid.UUID -} - -func (s *SubscribePacket) String() string { - str := fmt.Sprintf("%s\n", s.FixedHeader) - str += fmt.Sprintf("MessageID: %d topics: %s", s.MessageID, s.Topics) - return str -} - -func (s *SubscribePacket) Write(w io.Writer) error { - var body bytes.Buffer - var err error - - body.Write(encodeUint16(s.MessageID)) - for i, topic := range s.Topics { - body.Write(encodeString(topic)) - body.WriteByte(s.Qoss[i]) - } - s.FixedHeader.RemainingLength = body.Len() - packet := s.FixedHeader.pack() - packet.Write(body.Bytes()) - _, err = packet.WriteTo(w) - - return err -} - -//Unpack decodes the details of a ControlPacket after the fixed -//header has been read -func (s *SubscribePacket) Unpack(b io.Reader) { - s.MessageID = decodeUint16(b) - payloadLength := s.FixedHeader.RemainingLength - 2 - for payloadLength > 0 { - topic := decodeString(b) - s.Topics = append(s.Topics, topic) - qos := decodeByte(b) - s.Qoss = append(s.Qoss, qos) - payloadLength -= 2 + len(topic) + 1 //2 bytes of string length, plus string, plus 1 byte for Qos - } -} - -//Details returns a Details struct containing the Qos and -//MessageID of this ControlPacket -func (s *SubscribePacket) Details() Details { - return Details{Qos: 1, MessageID: s.MessageID} -} - -//UUID returns the unique ID assigned to the ControlPacket when -//it was originally received. Note: this is not related to the -//MessageID field for MQTT packets -func (s *SubscribePacket) UUID() uuid.UUID { - return s.uuid -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/unsuback.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/unsuback.go deleted file mode 100644 index ef67734f2..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/unsuback.go +++ /dev/null @@ -1,50 +0,0 @@ -package packets - -import ( - "fmt" - "github.com/pborman/uuid" - "io" -) - -//UnsubackPacket is an internal representation of the fields of the -//Unsuback MQTT packet -type UnsubackPacket struct { - FixedHeader - MessageID uint16 - uuid uuid.UUID -} - -func (ua *UnsubackPacket) String() string { - str := fmt.Sprintf("%s\n", ua.FixedHeader) - str += fmt.Sprintf("MessageID: %d", ua.MessageID) - return str -} - -func (ua *UnsubackPacket) Write(w io.Writer) error { - var err error - ua.FixedHeader.RemainingLength = 2 - packet := ua.FixedHeader.pack() - packet.Write(encodeUint16(ua.MessageID)) - _, err = packet.WriteTo(w) - - return err -} - -//Unpack decodes the details of a ControlPacket after the fixed -//header has been read -func (ua *UnsubackPacket) Unpack(b io.Reader) { - ua.MessageID = decodeUint16(b) -} - -//Details returns a Details struct containing the Qos and -//MessageID of this ControlPacket -func (ua *UnsubackPacket) Details() Details { - return Details{Qos: 0, MessageID: ua.MessageID} -} - -//UUID returns the unique ID assigned to the ControlPacket when -//it was originally received. Note: this is not related to the -//MessageID field for MQTT packets -func (ua *UnsubackPacket) UUID() uuid.UUID { - return ua.uuid -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/unsubscribe.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/unsubscribe.go deleted file mode 100644 index d7ee03cfc..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/unsubscribe.go +++ /dev/null @@ -1,61 +0,0 @@ -package packets - -import ( - "bytes" - "fmt" - "github.com/pborman/uuid" - "io" -) - -//UnsubscribePacket is an internal representation of the fields of the -//Unsubscribe MQTT packet -type UnsubscribePacket struct { - FixedHeader - MessageID uint16 - Topics []string - uuid uuid.UUID -} - -func (u *UnsubscribePacket) String() string { - str := fmt.Sprintf("%s\n", u.FixedHeader) - str += fmt.Sprintf("MessageID: %d", u.MessageID) - return str -} - -func (u *UnsubscribePacket) Write(w io.Writer) error { - var body bytes.Buffer - var err error - body.Write(encodeUint16(u.MessageID)) - for _, topic := range u.Topics { - body.Write(encodeString(topic)) - } - u.FixedHeader.RemainingLength = body.Len() - packet := u.FixedHeader.pack() - packet.Write(body.Bytes()) - _, err = packet.WriteTo(w) - - return err -} - -//Unpack decodes the details of a ControlPacket after the fixed -//header has been read -func (u *UnsubscribePacket) Unpack(b io.Reader) { - u.MessageID = decodeUint16(b) - var topic string - for topic = decodeString(b); topic != ""; topic = decodeString(b) { - u.Topics = append(u.Topics, topic) - } -} - -//Details returns a Details struct containing the Qos and -//MessageID of this ControlPacket -func (u *UnsubscribePacket) Details() Details { - return Details{Qos: 1, MessageID: u.MessageID} -} - -//UUID returns the unique ID assigned to the ControlPacket when -//it was originally received. Note: this is not related to the -//MessageID field for MQTT packets -func (u *UnsubscribePacket) UUID() uuid.UUID { - return u.uuid -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/ping.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/ping.go deleted file mode 100644 index 1ccd1ec89..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/ping.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "errors" - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" - "sync" - "time" -) - -type lastcontact struct { - sync.Mutex - lasttime time.Time -} - -func (l *lastcontact) update() { - l.Lock() - defer l.Unlock() - l.lasttime = time.Now() - -} - -func (l *lastcontact) get() time.Time { - l.Lock() - defer l.Unlock() - return l.lasttime -} - -func keepalive(c *Client) { - DEBUG.Println(PNG, "keepalive starting") - c.pingOutstanding = false - - for { - select { - case <-c.stop: - DEBUG.Println(PNG, "keepalive stopped") - c.workers.Done() - return - default: - last := uint(time.Since(c.lastContact.get()).Seconds()) - //DEBUG.Printf("%s last contact: %d (timeout: %d)", PNG, last, uint(c.options.KeepAlive.Seconds())) - if last > uint(c.options.KeepAlive.Seconds()) { - if !c.pingOutstanding { - DEBUG.Println(PNG, "keepalive sending ping") - ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket) - //We don't want to wait behind large messages being sent, the Write call - //will block until it it able to send the packet. - ping.Write(c.conn) - c.pingOutstanding = true - } else { - CRITICAL.Println(PNG, "pingresp not received, disconnecting") - c.workers.Done() - c.internalConnLost(errors.New("pingresp not received, disconnecting")) - return - } - } - time.Sleep(1 * time.Second) - } - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/router.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/router.go deleted file mode 100644 index 8e8172cb5..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/router.go +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "container/list" - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" - "strings" - "sync" -) - -// route is a type which associates MQTT Topic strings with a -// callback to be executed upon the arrival of a message associated -// with a subscription to that topic. -type route struct { - topic string - callback MessageHandler -} - -// match takes a slice of strings which represent the route being tested having been split on '/' -// separators, and a slice of strings representing the topic string in the published message, similarly -// split. -// The function determines if the topic string matches the route according to the MQTT topic rules -// and returns a boolean of the outcome -func match(route []string, topic []string) bool { - if len(route) == 0 { - if len(topic) == 0 { - return true - } - return false - } - - if len(topic) == 0 { - if route[0] == "#" { - return true - } - return false - } - - if route[0] == "#" { - return true - } - - if (route[0] == "+") || (route[0] == topic[0]) { - return match(route[1:], topic[1:]) - } - - return false -} - -func routeIncludesTopic(route, topic string) bool { - return match(strings.Split(route, "/"), strings.Split(topic, "/")) -} - -// match takes the topic string of the published message and does a basic compare to the -// string of the current Route, if they match it returns true -func (r *route) match(topic string) bool { - return r.topic == topic || routeIncludesTopic(r.topic, topic) -} - -type router struct { - sync.RWMutex - routes *list.List - defaultHandler MessageHandler - messages chan *packets.PublishPacket - stop chan bool -} - -// newRouter returns a new instance of a Router and channel which can be used to tell the Router -// to stop -func newRouter() (*router, chan bool) { - router := &router{routes: list.New(), messages: make(chan *packets.PublishPacket), stop: make(chan bool)} - stop := router.stop - return router, stop -} - -// addRoute takes a topic string and MessageHandler callback. It looks in the current list of -// routes to see if there is already a matching Route. If there is it replaces the current -// callback with the new one. If not it add a new entry to the list of Routes. -func (r *router) addRoute(topic string, callback MessageHandler) { - r.Lock() - defer r.Unlock() - for e := r.routes.Front(); e != nil; e = e.Next() { - if e.Value.(*route).match(topic) { - r := e.Value.(*route) - r.callback = callback - return - } - } - r.routes.PushBack(&route{topic: topic, callback: callback}) -} - -// deleteRoute takes a route string, looks for a matching Route in the list of Routes. If -// found it removes the Route from the list. -func (r *router) deleteRoute(topic string) { - r.Lock() - defer r.Unlock() - for e := r.routes.Front(); e != nil; e = e.Next() { - if e.Value.(*route).match(topic) { - r.routes.Remove(e) - return - } - } -} - -// setDefaultHandler assigns a default callback that will be called if no matching Route -// is found for an incoming Publish. -func (r *router) setDefaultHandler(handler MessageHandler) { - r.defaultHandler = handler -} - -// matchAndDispatch takes a channel of Message pointers as input and starts a go routine that -// takes messages off the channel, matches them against the internal route list and calls the -// associated callback (or the defaultHandler, if one exists and no other route matched). If -// anything is sent down the stop channel the function will end. -func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order bool, client *Client) { - go func() { - for { - select { - case message := <-messages: - sent := false - r.RLock() - for e := r.routes.Front(); e != nil; e = e.Next() { - if e.Value.(*route).match(message.TopicName) { - if order { - r.RUnlock() - e.Value.(*route).callback(client, messageFromPublish(message)) - r.RLock() - } else { - go e.Value.(*route).callback(client, messageFromPublish(message)) - } - sent = true - } - } - r.RUnlock() - if !sent && r.defaultHandler != nil { - if order { - r.RLock() - r.defaultHandler(client, messageFromPublish(message)) - r.RUnlock() - } else { - go r.defaultHandler(client, messageFromPublish(message)) - } - } - case <-r.stop: - return - } - } - }() -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/bug-ping.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/bug-ping.go deleted file mode 100644 index 4e1f3065b..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/bug-ping.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "time" - - MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" -) - -func main() { - opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883") - c := MQTT.NewClient(opts) - if token := c.Connect(); token.Wait() && token.Error() != nil { - panic(token.Error()) - } - for { - time.Sleep(1 * time.Second) - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/build.sh b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/build.sh deleted file mode 100644 index 11c5a6bbd..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/build.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -go clean - -for file in *.go -do - echo -n "Compiling $file ..." - go build "$file" - echo " done." -done diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/close_bug.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/close_bug.go deleted file mode 100644 index 8fa0035df..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/close_bug.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "fmt" - "time" - - MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" -) - -func main() { - opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883") - opts.SetCleanSession(true) - - c := MQTT.NewClient(opts) - if token := c.Connect(); token.Wait() && token.Error() != nil { - panic(token.Error()) - } - fmt.Println("plz mosquitto goes down now") - time.Sleep(5 * time.Second) - - c.Disconnect(200) - time.Sleep(5 * time.Second) -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/custom_store.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/custom_store.go deleted file mode 100644 index 19c2a28b6..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/custom_store.go +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -// This demonstrates how to implement your own Store interface and provide -// it to the go-mqtt client. - -package main - -import ( - "fmt" - "time" - - MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" -) - -// This NoOpStore type implements the go-mqtt/Store interface, which -// allows it to be used by the go-mqtt client library. However, it is -// highly recommended that you do not use this NoOpStore in production, -// because it will NOT provide any sort of guaruntee of message delivery. -type NoOpStore struct { - // Contain nothing -} - -func (store *NoOpStore) Open() { - // Do nothing -} - -func (store *NoOpStore) Put(string, packets.ControlPacket) { - // Do nothing -} - -func (store *NoOpStore) Get(string) packets.ControlPacket { - // Do nothing - return nil -} - -func (store *NoOpStore) Del(string) { - // Do nothing -} - -func (store *NoOpStore) All() []string { - return nil -} - -func (store *NoOpStore) Close() { - // Do Nothing -} - -func (store *NoOpStore) Reset() { - // Do Nothing -} - -func main() { - myNoOpStore := &NoOpStore{} - - opts := MQTT.NewClientOptions() - opts.AddBroker("tcp://iot.eclipse.org:1883") - opts.SetClientID("custom-store") - opts.SetStore(myNoOpStore) - - var callback MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) - } - - c := MQTT.NewClient(opts) - if token := c.Connect(); token.Wait() && token.Error() != nil { - panic(token.Error()) - } - - c.Subscribe("/go-mqtt/sample", 0, callback) - - for i := 0; i < 5; i++ { - text := fmt.Sprintf("this is msg #%d!", i) - token := c.Publish("/go-mqtt/sample", 0, false, text) - token.Wait() - } - - for i := 1; i < 5; i++ { - time.Sleep(1 * time.Second) - } - - c.Disconnect(250) -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/mosquitto.conf b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/mosquitto.conf deleted file mode 100644 index 4c54bc201..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/mosquitto.conf +++ /dev/null @@ -1,745 +0,0 @@ -# Config file for mosquitto -# -# See mosquitto.conf(5) for more information. -# -# Default values are shown, uncomment to change. -# -# Use the # character to indicate a comment, but only if it is the -# very first character on the line. - -# ================================================================= -# General configuration -# ================================================================= - -# Time in seconds to wait before resending an outgoing QoS=1 or -# QoS=2 message. -#retry_interval 20 - -# Time in seconds between updates of the $SYS tree. -# Set to 0 to disable the publishing of the $SYS tree. -#sys_interval 10 - -# Time in seconds between cleaning the internal message store of -# unreferenced messages. Lower values will result in lower memory -# usage but more processor time, higher values will have the -# opposite effect. -# Setting a value of 0 means the unreferenced messages will be -# disposed of as quickly as possible. -#store_clean_interval 10 - -# Write process id to a file. Default is a blank string which means -# a pid file shouldn't be written. -# This should be set to /var/run/mosquitto.pid if mosquitto is -# being run automatically on boot with an init script and -# start-stop-daemon or similar. -#pid_file - -# When run as root, drop privileges to this user and its primary -# group. -# Leave blank to stay as root, but this is not recommended. -# If run as a non-root user, this setting has no effect. -# Note that on Windows this has no effect and so mosquitto should -# be started by the user you wish it to run as. -#user mosquitto - -# The maximum number of QoS 1 and 2 messages currently inflight per -# client. -# This includes messages that are partway through handshakes and -# those that are being retried. Defaults to 20. Set to 0 for no -# maximum. Setting to 1 will guarantee in-order delivery of QoS 1 -# and 2 messages. -#max_inflight_messages 20 - -# The maximum number of QoS 1 and 2 messages to hold in a queue -# above those that are currently in-flight. Defaults to 100. Set -# to 0 for no maximum (not recommended). -# See also queue_qos0_messages. -#max_queued_messages 100 - -# Set to true to queue messages with QoS 0 when a persistent client is -# disconnected. These messages are included in the limit imposed by -# max_queued_messages. -# Defaults to false. -# This is a non-standard option for the MQTT v3.1 spec but is allowed in -# v3.1.1. -#queue_qos0_messages false - -# This option sets the maximum publish payload size that the broker will allow. -# Received messages that exceed this size will not be accepted by the broker. -# The default value is 0, which means that all valid MQTT messages are -# accepted. MQTT imposes a maximum payload size of 268435455 bytes. -#message_size_limit 0 - -# This option controls whether a client is allowed to connect with a zero -# length client id or not. This option only affects clients using MQTT v3.1.1 -# and later. If set to false, clients connecting with a zero length client id -# are disconnected. If set to true, clients will be allocated a client id by -# the broker. This means it is only useful for clients with clean session set -# to true. -#allow_zero_length_clientid true - -# If allow_zero_length_clientid is true, this option allows you to set a prefix -# to automatically generated client ids to aid visibility in logs. -#auto_id_prefix - -# This option allows persistent clients (those with clean session set to false) -# to be removed if they do not reconnect within a certain time frame. -# -# This is a non-standard option in MQTT V3.1 but allowed in MQTT v3.1.1. -# -# Badly designed clients may set clean session to false whilst using a randomly -# generated client id. This leads to persistent clients that will never -# reconnect. This option allows these clients to be removed. -# -# The expiration period should be an integer followed by one of d w m y for -# day, week, month and year respectively. For example -# -# persistent_client_expiration 2m -# persistent_client_expiration 14d -# persistent_client_expiration 1y -# -# The default if not set is to never expire persistent clients. -#persistent_client_expiration - -# If a client is subscribed to multiple subscriptions that overlap, e.g. foo/# -# and foo/+/baz , then MQTT expects that when the broker receives a message on -# a topic that matches both subscriptions, such as foo/bar/baz, then the client -# should only receive the message once. -# Mosquitto keeps track of which clients a message has been sent to in order to -# meet this requirement. The allow_duplicate_messages option allows this -# behaviour to be disabled, which may be useful if you have a large number of -# clients subscribed to the same set of topics and are very concerned about -# minimising memory usage. -# It can be safely set to true if you know in advance that your clients will -# never have overlapping subscriptions, otherwise your clients must be able to -# correctly deal with duplicate messages even when then have QoS=2. -#allow_duplicate_messages false - -# The MQTT specification requires that the QoS of a message delivered to a -# subscriber is never upgraded to match the QoS of the subscription. Enabling -# this option changes this behaviour. If upgrade_outgoing_qos is set true, -# messages sent to a subscriber will always match the QoS of its subscription. -# This is a non-standard option explicitly disallowed by the spec. -#upgrade_outgoing_qos false - -# ================================================================= -# Default listener -# ================================================================= - -# IP address/hostname to bind the default listener to. If not -# given, the default listener will not be bound to a specific -# address and so will be accessible to all network interfaces. -# bind_address ip-address/host name -#bind_address - -# Port to use for the default listener. -#port 1883 - -# The maximum number of client connections to allow. This is -# a per listener setting. -# Default is -1, which means unlimited connections. -# Note that other process limits mean that unlimited connections -# are not really possible. Typically the default maximum number of -# connections possible is around 1024. -#max_connections -1 - -# ----------------------------------------------------------------- -# Certificate based SSL/TLS support -# ----------------------------------------------------------------- -# The following options can be used to enable SSL/TLS support for -# this listener. Note that the recommended port for MQTT over TLS -# is 8883, but this must be set manually. -# -# See also the mosquitto-tls man page. - -# At least one of cafile or capath must be defined. They both -# define methods of accessing the PEM encoded Certificate -# Authority certificates that have signed your server certificate -# and that you wish to trust. -# cafile defines the path to a file containing the CA certificates. -# capath defines a directory that will be searched for files -# containing the CA certificates. For capath to work correctly, the -# certificate files must have ".crt" as the file ending and you must run -# "c_rehash " each time you add/remove a certificate. -#cafile -#capath - -# Path to the PEM encoded server certificate. -#certfile - -# Path to the PEM encoded keyfile. -#keyfile - -# This option defines the version of the TLS protocol to use for this listener. -# The default value will always be the highest version that is available for -# the version of openssl that the broker was compiled against. For openssl >= -# 1.0.1 the valid values are tlsv1.2 tlsv1.1 and tlsv1. For openssl < 1.0.1 the -# valid values are tlsv1. -#tls_version - -# By default a TLS enabled listener will operate in a similar fashion to a -# https enabled web server, in that the server has a certificate signed by a CA -# and the client will verify that it is a trusted certificate. The overall aim -# is encryption of the network traffic. By setting require_certificate to true, -# the client must provide a valid certificate in order for the network -# connection to proceed. This allows access to the broker to be controlled -# outside of the mechanisms provided by MQTT. -#require_certificate false - -# If require_certificate is true, you may set use_identity_as_username to true -# to use the CN value from the client certificate as a username. If this is -# true, the password_file option will not be used for this listener. -#use_identity_as_username false - -# If you have require_certificate set to true, you can create a certificate -# revocation list file to revoke access to particular client certificates. If -# you have done this, use crlfile to point to the PEM encoded revocation file. -#crlfile - -# If you wish to control which encryption ciphers are used, use the ciphers -# option. The list of available ciphers can be optained using the "openssl -# ciphers" command and should be provided in the same format as the output of -# that command. -# If unset defaults to DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2:@STRENGTH -#ciphers DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2:@STRENGTH - -# ----------------------------------------------------------------- -# Pre-shared-key based SSL/TLS support -# ----------------------------------------------------------------- -# The following options can be used to enable PSK based SSL/TLS support for -# this listener. Note that the recommended port for MQTT over TLS is 8883, but -# this must be set manually. -# -# See also the mosquitto-tls man page and the "Certificate based SSL/TLS -# support" section. Only one of certificate or PSK encryption support can be -# enabled for any listener. - -# The psk_hint option enables pre-shared-key support for this listener and also -# acts as an identifier for this listener. The hint is sent to clients and may -# be used locally to aid authentication. The hint is a free form string that -# doesn't have much meaning in itself, so feel free to be creative. -# If this option is provided, see psk_file to define the pre-shared keys to be -# used or create a security plugin to handle them. -#psk_hint - -# Set use_identity_as_username to have the psk identity sent by the client used -# as its username. Authentication will be carried out using the PSK rather than -# the MQTT username/password and so password_file will not be used for this -# listener. -#use_identity_as_username false - -# When using PSK, the encryption ciphers used will be chosen from the list of -# available PSK ciphers. If you want to control which ciphers are available, -# use the "ciphers" option. The list of available ciphers can be optained -# using the "openssl ciphers" command and should be provided in the same format -# as the output of that command. -#ciphers - -# ================================================================= -# Extra listeners -# ================================================================= - -# Listen on a port/ip address combination. By using this variable -# multiple times, mosquitto can listen on more than one port. If -# this variable is used and neither bind_address nor port given, -# then the default listener will not be started. -# The port number to listen on must be given. Optionally, an ip -# address or host name may be supplied as a second argument. In -# this case, mosquitto will attempt to bind the listener to that -# address and so restrict access to the associated network and -# interface. By default, mosquitto will listen on all interfaces. -# listener port-number [ip address/host name] -#listener - -# The maximum number of client connections to allow. This is -# a per listener setting. -# Default is -1, which means unlimited connections. -# Note that other process limits mean that unlimited connections -# are not really possible. Typically the default maximum number of -# connections possible is around 1024. -#max_connections -1 - -# The listener can be restricted to operating within a topic hierarchy using -# the mount_point option. This is achieved be prefixing the mount_point string -# to all topics for any clients connected to this listener. This prefixing only -# happens internally to the broker; the client will not see the prefix. -#mount_point - -# ----------------------------------------------------------------- -# Certificate based SSL/TLS support -# ----------------------------------------------------------------- -# The following options can be used to enable certificate based SSL/TLS support -# for this listener. Note that the recommended port for MQTT over TLS is 8883, -# but this must be set manually. -# -# See also the mosquitto-tls man page and the "Pre-shared-key based SSL/TLS -# support" section. Only one of certificate or PSK encryption support can be -# enabled for any listener. - -# At least one of cafile or capath must be defined to enable certificate based -# TLS encryption. They both define methods of accessing the PEM encoded -# Certificate Authority certificates that have signed your server certificate -# and that you wish to trust. -# cafile defines the path to a file containing the CA certificates. -# capath defines a directory that will be searched for files -# containing the CA certificates. For capath to work correctly, the -# certificate files must have ".crt" as the file ending and you must run -# "c_rehash " each time you add/remove a certificate. -#cafile -#capath - -# Path to the PEM encoded server certificate. -#certfile - -# Path to the PEM encoded keyfile. -#keyfile - -# By default an TLS enabled listener will operate in a similar fashion to a -# https enabled web server, in that the server has a certificate signed by a CA -# and the client will verify that it is a trusted certificate. The overall aim -# is encryption of the network traffic. By setting require_certificate to true, -# the client must provide a valid certificate in order for the network -# connection to proceed. This allows access to the broker to be controlled -# outside of the mechanisms provided by MQTT. -#require_certificate false - -# If require_certificate is true, you may set use_identity_as_username to true -# to use the CN value from the client certificate as a username. If this is -# true, the password_file option will not be used for this listener. -#use_identity_as_username false - -# If you have require_certificate set to true, you can create a certificate -# revocation list file to revoke access to particular client certificates. If -# you have done this, use crlfile to point to the PEM encoded revocation file. -#crlfile - -# If you wish to control which encryption ciphers are used, use the ciphers -# option. The list of available ciphers can be optained using the "openssl -# ciphers" command and should be provided in the same format as the output of -# that command. -#ciphers - -# ----------------------------------------------------------------- -# Pre-shared-key based SSL/TLS support -# ----------------------------------------------------------------- -# The following options can be used to enable PSK based SSL/TLS support for -# this listener. Note that the recommended port for MQTT over TLS is 8883, but -# this must be set manually. -# -# See also the mosquitto-tls man page and the "Certificate based SSL/TLS -# support" section. Only one of certificate or PSK encryption support can be -# enabled for any listener. - -# The psk_hint option enables pre-shared-key support for this listener and also -# acts as an identifier for this listener. The hint is sent to clients and may -# be used locally to aid authentication. The hint is a free form string that -# doesn't have much meaning in itself, so feel free to be creative. -# If this option is provided, see psk_file to define the pre-shared keys to be -# used or create a security plugin to handle them. -#psk_hint - -# Set use_identity_as_username to have the psk identity sent by the client used -# as its username. Authentication will be carried out using the PSK rather than -# the MQTT username/password and so password_file will not be used for this -# listener. -#use_identity_as_username false - -# When using PSK, the encryption ciphers used will be chosen from the list of -# available PSK ciphers. If you want to control which ciphers are available, -# use the "ciphers" option. The list of available ciphers can be optained -# using the "openssl ciphers" command and should be provided in the same format -# as the output of that command. -#ciphers - -# ================================================================= -# Persistence -# ================================================================= - -# If persistence is enabled, save the in-memory database to disk -# every autosave_interval seconds. If set to 0, the persistence -# database will only be written when mosquitto exits. See also -# autosave_on_changes. -# Note that writing of the persistence database can be forced by -# sending mosquitto a SIGUSR1 signal. -#autosave_interval 1800 - -# If true, mosquitto will count the number of subscription changes, retained -# messages received and queued messages and if the total exceeds -# autosave_interval then the in-memory database will be saved to disk. -# If false, mosquitto will save the in-memory database to disk by treating -# autosave_interval as a time in seconds. -#autosave_on_changes false - -# Save persistent message data to disk (true/false). -# This saves information about all messages, including -# subscriptions, currently in-flight messages and retained -# messages. -# retained_persistence is a synonym for this option. -#persistence false - -# The filename to use for the persistent database, not including -# the path. -#persistence_file mosquitto.db - -# Location for persistent database. Must include trailing / -# Default is an empty string (current directory). -# Set to e.g. /var/lib/mosquitto/ if running as a proper service on Linux or -# similar. -#persistence_location - -# ================================================================= -# Logging -# ================================================================= - -# Places to log to. Use multiple log_dest lines for multiple -# logging destinations. -# Possible destinations are: stdout stderr syslog topic file -# -# stdout and stderr log to the console on the named output. -# -# syslog uses the userspace syslog facility which usually ends up -# in /var/log/messages or similar. -# -# topic logs to the broker topic '$SYS/broker/log/', -# where severity is one of D, E, W, N, I, M which are debug, error, -# warning, notice, information and message. Message type severity is used by -# the subscribe/unsubscribe log_types and publishes log messages to -# $SYS/broker/log/M/susbcribe or $SYS/broker/log/M/unsubscribe. -# -# The file destination requires an additional parameter which is the file to be -# logged to, e.g. "log_dest file /var/log/mosquitto.log". The file will be -# closed and reopened when the broker receives a HUP signal. Only a single file -# destination may be configured. -# -# Note that if the broker is running as a Windows service it will default to -# "log_dest none" and neither stdout nor stderr logging is available. -# Use "log_dest none" if you wish to disable logging. -log_dest stdout - -# Types of messages to log. Use multiple log_type lines for logging -# multiple types of messages. -# Possible types are: debug, error, warning, notice, information, -# none, subscribe, unsubscribe, all. -# Note that debug type messages are for decoding the incoming/outgoing -# network packets. They are not logged in "topics". -#log_type error -#log_type warning -#log_type notice -log_type information - -# If set to true, client connection and disconnection messages will be included -# in the log. -#connection_messages true - -# If set to true, add a timestamp value to each log message. -#log_timestamp true - -# ================================================================= -# Security -# ================================================================= - -# If set, only clients that have a matching prefix on their -# clientid will be allowed to connect to the broker. By default, -# all clients may connect. -# For example, setting "secure-" here would mean a client "secure- -# client" could connect but another with clientid "mqtt" couldn't. -#clientid_prefixes - -# Boolean value that determines whether clients that connect -# without providing a username are allowed to connect. If set to -# false then a password file should be created (see the -# password_file option) to control authenticated client access. -# Defaults to true. -#allow_anonymous true - -# In addition to the clientid_prefixes, allow_anonymous and TLS -# authentication options, username based authentication is also -# possible. The default support is described in "Default -# authentication and topic access control" below. The auth_plugin -# allows another authentication method to be used. -# Specify the path to the loadable plugin and see the -# "Authentication and topic access plugin options" section below. -#auth_plugin - -# ----------------------------------------------------------------- -# Default authentication and topic access control -# ----------------------------------------------------------------- - -# Control access to the broker using a password file. This file can be -# generated using the mosquitto_passwd utility. If TLS support is not compiled -# into mosquitto (it is recommended that TLS support should be included) then -# plain text passwords are used, in which case the file should be a text file -# with lines in the format: -# username:password -# The password (and colon) may be omitted if desired, although this -# offers very little in the way of security. -# -# See the TLS client require_certificate and use_identity_as_username options -# for alternative authentication options. -password_file pwfile.example - -# Access may also be controlled using a pre-shared-key file. This requires -# TLS-PSK support and a listener configured to use it. The file should be text -# lines in the format: -# identity:key -# The key should be in hexadecimal format without a leading "0x". -#psk_file - -# Control access to topics on the broker using an access control list -# file. If this parameter is defined then only the topics listed will -# have access. -# If the first character of a line of the ACL file is a # it is treated as a -# comment. -# Topic access is added with lines of the format: -# -# topic [read|write] -# -# The access type is controlled using "read" or "write". This parameter -# is optional - if not given then the access is read/write. -# can contain the + or # wildcards as in subscriptions. -# -# The first set of topics are applied to anonymous clients, assuming -# allow_anonymous is true. User specific topic ACLs are added after a -# user line as follows: -# -# user -# -# The username referred to here is the same as in password_file. It is -# not the clientid. -# -# -# If is also possible to define ACLs based on pattern substitution within the -# topic. The patterns available for substition are: -# -# %c to match the client id of the client -# %u to match the username of the client -# -# The substitution pattern must be the only text for that level of hierarchy. -# -# The form is the same as for the topic keyword, but using pattern as the -# keyword. -# Pattern ACLs apply to all users even if the "user" keyword has previously -# been given. -# -# If using bridges with usernames and ACLs, connection messages can be allowed -# with the following pattern: -# pattern write $SYS/broker/connection/%c/state -# -# pattern [read|write] -# -# Example: -# -# pattern write sensor/%u/data -# -#acl_file - -# ----------------------------------------------------------------- -# Authentication and topic access plugin options -# ----------------------------------------------------------------- - -# If the auth_plugin option above is used, define options to pass to the -# plugin here as described by the plugin instructions. All options named -# using the format auth_opt_* will be passed to the plugin, for example: -# -# auth_opt_db_host -# auth_opt_db_port -# auth_opt_db_username -# auth_opt_db_password - - -# ================================================================= -# Bridges -# ================================================================= - -# A bridge is a way of connecting multiple MQTT brokers together. -# Create a new bridge using the "connection" option as described below. Set -# options for the bridges using the remaining parameters. You must specify the -# address and at least one topic to subscribe to. -# Each connection must have a unique name. -# The address line may have multiple host address and ports specified. See -# below in the round_robin description for more details on bridge behaviour if -# multiple addresses are used. -# The direction that the topic will be shared can be chosen by -# specifying out, in or both, where the default value is out. -# The QoS level of the bridged communication can be specified with the next -# topic option. The default QoS level is 0, to change the QoS the topic -# direction must also be given. -# The local and remote prefix options allow a topic to be remapped when it is -# bridged to/from the remote broker. This provides the ability to place a topic -# tree in an appropriate location. -# For more details see the mosquitto.conf man page. -# Multiple topics can be specified per connection, but be careful -# not to create any loops. -# If you are using bridges with cleansession set to false (the default), then -# you may get unexpected behaviour from incoming topics if you change what -# topics you are subscribing to. This is because the remote broker keeps the -# subscription for the old topic. If you have this problem, connect your bridge -# with cleansession set to true, then reconnect with cleansession set to false -# as normal. -#connection -#address [:] [[:]] -#topic [[[out | in | both] qos-level] local-prefix remote-prefix] - -# If the bridge has more than one address given in the address/addresses -# configuration, the round_robin option defines the behaviour of the bridge on -# a failure of the bridge connection. If round_robin is false, the default -# value, then the first address is treated as the main bridge connection. If -# the connection fails, the other secondary addresses will be attempted in -# turn. Whilst connected to a secondary bridge, the bridge will periodically -# attempt to reconnect to the main bridge until successful. -# If round_robin is true, then all addresses are treated as equals. If a -# connection fails, the next address will be tried and if successful will -# remain connected until it fails -#round_robin false - -# Set the client id for this bridge connection. If not defined, -# this defaults to 'name.hostname' where name is the connection -# name and hostname is the hostname of this computer. -#clientid - -# Set the clean session variable for this bridge. -# When set to true, when the bridge disconnects for any reason, all -# messages and subscriptions will be cleaned up on the remote -# broker. Note that with cleansession set to true, there may be a -# significant amount of retained messages sent when the bridge -# reconnects after losing its connection. -# When set to false, the subscriptions and messages are kept on the -# remote broker, and delivered when the bridge reconnects. -#cleansession false - -# If set to true, publish notification messages to the local and remote brokers -# giving information about the state of the bridge connection. Retained -# messages are published to the topic $SYS/broker/connection//state -# unless the notification_topic option is used. -# If the message is 1 then the connection is active, or 0 if the connection has -# failed. -#notifications true - -# Choose the topic on which notification messages for this bridge are -# published. If not set, messages are published on the topic -# $SYS/broker/connection//state -#notification_topic - -# Set the keepalive interval for this bridge connection, in -# seconds. -#keepalive_interval 60 - -# Set the start type of the bridge. This controls how the bridge starts and -# can be one of three types: automatic, lazy and once. Note that RSMB provides -# a fourth start type "manual" which isn't currently supported by mosquitto. -# -# "automatic" is the default start type and means that the bridge connection -# will be started automatically when the broker starts and also restarted -# after a short delay (30 seconds) if the connection fails. -# -# Bridges using the "lazy" start type will be started automatically when the -# number of queued messages exceeds the number set with the "threshold" -# parameter. It will be stopped automatically after the time set by the -# "idle_timeout" parameter. Use this start type if you wish the connection to -# only be active when it is needed. -# -# A bridge using the "once" start type will be started automatically when the -# broker starts but will not be restarted if the connection fails. -#start_type automatic - -# Set the amount of time a bridge using the automatic start type will wait -# until attempting to reconnect. Defaults to 30 seconds. -#restart_timeout 30 - -# Set the amount of time a bridge using the lazy start type must be idle before -# it will be stopped. Defaults to 60 seconds. -#idle_timeout 60 - -# Set the number of messages that need to be queued for a bridge with lazy -# start type to be restarted. Defaults to 10 messages. -# Must be less than max_queued_messages. -#threshold 10 - -# If try_private is set to true, the bridge will attempt to indicate to the -# remote broker that it is a bridge not an ordinary client. If successful, this -# means that loop detection will be more effective and that retained messages -# will be propagated correctly. Not all brokers support this feature so it may -# be necessary to set try_private to false if your bridge does not connect -# properly. -#try_private true - -# Set the username to use when connecting to an MQTT v3.1 broker -# that requires authentication. -#username - -# Set the password to use when connecting to an MQTT v3.1 broker -# that requires authentication. This option is only used if -# username is also set. -#password - -# ----------------------------------------------------------------- -# Certificate based SSL/TLS support -# ----------------------------------------------------------------- -# Either bridge_cafile or bridge_capath must be defined to enable TLS support -# for this bridge. -# bridge_cafile defines the path to a file containing the -# Certificate Authority certificates that have signed the remote broker -# certificate. -# bridge_capath defines a directory that will be searched for files containing -# the CA certificates. For bridge_capath to work correctly, the certificate -# files must have ".crt" as the file ending and you must run "c_rehash " each time you add/remove a certificate. -#bridge_cafile -#bridge_capath - -# Path to the PEM encoded client certificate, if required by the remote broker. -#bridge_certfile - -# Path to the PEM encoded client private key, if required by the remote broker. -#bridge_keyfile - -# When using certificate based encryption, bridge_insecure disables -# verification of the server hostname in the server certificate. This can be -# useful when testing initial server configurations, but makes it possible for -# a malicious third party to impersonate your server through DNS spoofing, for -# example. Use this option in testing only. If you need to resort to using this -# option in a production environment, your setup is at fault and there is no -# point using encryption. -#bridge_insecure false - -# ----------------------------------------------------------------- -# PSK based SSL/TLS support -# ----------------------------------------------------------------- -# Pre-shared-key encryption provides an alternative to certificate based -# encryption. A bridge can be configured to use PSK with the bridge_identity -# and bridge_psk options. These are the client PSK identity, and pre-shared-key -# in hexadecimal format with no "0x". Only one of certificate and PSK based -# encryption can be used on one -# bridge at once. -#bridge_identity -#bridge_psk - - -# ================================================================= -# External config files -# ================================================================= - -# External configuration files may be included by using the -# include_dir option. This defines a directory that will be searched -# for config files. All files that end in '.conf' will be loaded as -# a configuration file. It is best to have this as the last option -# in the main file. This option will only be processed from the main -# configuration file. The directory specified must not contain the -# main configuration file. -#include_dir - -# ================================================================= -# Unsupported rsmb options - for the future -# ================================================================= - -#addresses -#round_robin - -# ================================================================= -# rsmb options - unlikely to ever be supported -# ================================================================= - -#ffdc_output -#max_log_entries -#trace_level -#trace_output diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/pwfile.example b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/pwfile.example deleted file mode 100644 index 58b94c9aa..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/pwfile.example +++ /dev/null @@ -1,3 +0,0 @@ -roger:$6$clQ4Ocu312S0qWgl$Cv2wUxgEN73c6C6jlBkswqR4AkHsvDLWvtEXZZ8NpsBLgP1WAo/qA+WXcmEN/mjDNgdUwcxRAveqNMs2xUVQYA== -sub_client:$6$U+qg0/32F0g2Fh+n$fBPSkq/rfNyEQ/TkEjRgwGTTVBpvNhKSyGShovH9KHewsvJ731tD5Zx26IHhR5RYCICt0L9qBW0/KK31UkCliw== -pub_client:$6$vxQ89y+7WrsnL2yn$fSPMmEZn9TSrC8s/jaPmxJ9NijWpkP2e7bMJLz78JXR1vW2x8+T3FZ23byJA6xs5Mt+LeOybAHwcUv0OCl40rA== diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/routing.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/routing.go deleted file mode 100644 index d95c6b59c..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/routing.go +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -/*---------------------------------------------------------------------- -This sample is designed to demonstrate the ability to set individual -callbacks on a per-subscription basis. There are three handlers in use: - brokerLoadHandler - $SYS/broker/load/# - brokerConnectionHandler - $SYS/broker/connection/# - brokerClientHandler - $SYS/broker/clients/# -The client will receive 100 messages total from those subscriptions, -and then print the total number of messages received from each. -It may take a few moments for the sample to complete running, as it -must wait for messages to be published. ------------------------------------------------------------------------*/ - -package main - -import ( - "fmt" - "os" - - MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" -) - -var brokerLoad = make(chan bool) -var brokerConnection = make(chan bool) -var brokerClients = make(chan bool) - -func brokerLoadHandler(client *MQTT.Client, msg MQTT.Message) { - brokerLoad <- true - fmt.Printf("BrokerLoadHandler ") - fmt.Printf("[%s] ", msg.Topic()) - fmt.Printf("%s\n", msg.Payload()) -} - -func brokerConnectionHandler(client *MQTT.Client, msg MQTT.Message) { - brokerConnection <- true - fmt.Printf("BrokerConnectionHandler ") - fmt.Printf("[%s] ", msg.Topic()) - fmt.Printf("%s\n", msg.Payload()) -} - -func brokerClientsHandler(client *MQTT.Client, msg MQTT.Message) { - brokerClients <- true - fmt.Printf("BrokerClientsHandler ") - fmt.Printf("[%s] ", msg.Topic()) - fmt.Printf("%s\n", msg.Payload()) -} - -func main() { - opts := MQTT.NewClientOptions().AddBroker("tcp://iot.eclipse.org:1883").SetClientID("router-sample") - opts.SetCleanSession(true) - - c := MQTT.NewClient(opts) - if token := c.Connect(); token.Wait() && token.Error() != nil { - panic(token.Error()) - } - - if token := c.Subscribe("$SYS/broker/load/#", 0, brokerLoadHandler); token.Wait() && token.Error() != nil { - fmt.Println(token.Error()) - os.Exit(1) - } - - if token := c.Subscribe("$SYS/broker/connection/#", 0, brokerConnectionHandler); token.Wait() && token.Error() != nil { - fmt.Println(token.Error()) - os.Exit(1) - } - - if token := c.Subscribe("$SYS/broker/clients/#", 0, brokerClientsHandler); token.Wait() && token.Error() != nil { - fmt.Println(token.Error()) - os.Exit(1) - } - - loadCount := 0 - connectionCount := 0 - clientsCount := 0 - - for i := 0; i < 100; i++ { - select { - case <-brokerLoad: - loadCount++ - case <-brokerConnection: - connectionCount++ - case <-brokerClients: - clientsCount++ - } - } - - fmt.Printf("Received %3d Broker Load messages\n", loadCount) - fmt.Printf("Received %3d Broker Connection messages\n", connectionCount) - fmt.Printf("Received %3d Broker Clients messages\n", clientsCount) - - c.Disconnect(250) -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sample.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sample.go deleted file mode 100644 index 3f89f8af2..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sample.go +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package main - -import ( - "flag" - "fmt" - "os" - - MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" -) - -/* -Options: - [-help] Display help - [-a pub|sub] Action pub (publish) or sub (subscribe) - [-m ] Payload to send - [-n ] Number of messages to send or receive - [-q 0|1|2] Quality of Service - [-clean] CleanSession (true if -clean is present) - [-id ] CliendID - [-user ] User - [-password ] Password - [-broker ] Broker URI - [-topic ] Topic - [-store ] Store Directory - -*/ - -func main() { - topic := flag.String("topic", "", "The topic name to/from which to publish/subscribe") - broker := flag.String("broker", "tcp://iot.eclipse.org:1883", "The broker URI. ex: tcp://10.10.1.1:1883") - password := flag.String("password", "", "The password (optional)") - user := flag.String("user", "", "The User (optional)") - id := flag.String("id", "testgoid", "The ClientID (optional)") - cleansess := flag.Bool("clean", false, "Set Clean Session (default false)") - qos := flag.Int("qos", 0, "The Quality of Service 0,1,2 (default 0)") - num := flag.Int("num", 1, "The number of messages to publish or subscribe (default 1)") - payload := flag.String("message", "", "The message text to publish (default empty)") - action := flag.String("action", "", "Action publish or subscribe (required)") - store := flag.String("store", ":memory:", "The Store Directory (default use memory store)") - flag.Parse() - - if *action != "pub" && *action != "sub" { - fmt.Println("Invalid setting for -action, must be pub or sub") - return - } - - if *topic == "" { - fmt.Println("Invalid setting for -topic, must not be empty") - return - } - - fmt.Printf("Sample Info:\n") - fmt.Printf("\taction: %s\n", *action) - fmt.Printf("\tbroker: %s\n", *broker) - fmt.Printf("\tclientid: %s\n", *id) - fmt.Printf("\tuser: %s\n", *user) - fmt.Printf("\tpassword: %s\n", *password) - fmt.Printf("\ttopic: %s\n", *topic) - fmt.Printf("\tmessage: %s\n", *payload) - fmt.Printf("\tqos: %d\n", *qos) - fmt.Printf("\tcleansess: %v\n", *cleansess) - fmt.Printf("\tnum: %d\n", *num) - fmt.Printf("\tstore: %s\n", *store) - - opts := MQTT.NewClientOptions() - opts.AddBroker(*broker) - opts.SetClientID(*id) - opts.SetUsername(*user) - opts.SetPassword(*password) - opts.SetCleanSession(*cleansess) - if *store != ":memory:" { - opts.SetStore(MQTT.NewFileStore(*store)) - } - - if *action == "pub" { - client := MQTT.NewClient(opts) - if token := client.Connect(); token.Wait() && token.Error() != nil { - panic(token.Error()) - } - fmt.Println("Sample Publisher Started") - for i := 0; i < *num; i++ { - fmt.Println("---- doing publish ----") - token := client.Publish(*topic, byte(*qos), false, *payload) - token.Wait() - } - - client.Disconnect(250) - fmt.Println("Sample Publisher Disconnected") - } else { - receiveCount := 0 - choke := make(chan [2]string) - - opts.SetDefaultPublishHandler(func(client *MQTT.Client, msg MQTT.Message) { - choke <- [2]string{msg.Topic(), string(msg.Payload())} - }) - - client := MQTT.NewClient(opts) - if token := client.Connect(); token.Wait() && token.Error() != nil { - panic(token.Error()) - } - - if token := client.Subscribe(*topic, byte(*qos), nil); token.Wait() && token.Error() != nil { - fmt.Println(token.Error()) - os.Exit(1) - } - - for receiveCount < *num { - incoming := <-choke - fmt.Printf("RECEIVED TOPIC: %s MESSAGE: %s\n", incoming[0], incoming[1]) - receiveCount++ - } - - client.Disconnect(250) - fmt.Println("Sample Subscriber Disconnected") - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/CAfile.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/CAfile.pem deleted file mode 100644 index 16c664a43..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/CAfile.pem +++ /dev/null @@ -1,150 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 1 (0x1) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy CA - Validity - Not Before: Oct 21 19:24:23 2013 GMT - Not After : Sep 25 19:24:23 2018 GMT - Subject: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy CA - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (2048 bit) - Modulus: - 00:c2:d1:d0:31:dc:93:c3:ad:88:0d:f8:93:fe:cc: - aa:04:1d:85:aa:c3:bb:bd:87:04:f0:42:67:14:34: - 4a:56:94:2b:bf:d0:6b:72:30:38:39:35:20:8c:e3: - 7e:65:82:b0:7e:3e:1d:f1:18:82:b7:d6:19:59:43: - ed:81:be:eb:51:44:fc:77:9e:37:ad:e1:a0:18:b9: - 4b:59:79:90:81:a4:e4:52:2f:fc:e2:ff:98:10:5e: - d5:13:9a:16:62:1a:e0:cb:ab:1d:ae:da:d1:40:d4: - 97:b1:e6:e3:f1:97:2c:2a:52:73:ab:d0:a2:15:f3: - 1e:9a:b0:67:d0:62:67:4b:74:b0:bb:8f:ef:9e:32: - 6a:4c:27:4e:82:7c:16:66:ce:06:e9:a3:d9:36:4f: - f4:3e:bc:80:00:93:c1:ca:31:cf:03:68:d4:e5:8b: - 38:45:b6:1b:35:b0:c0:e9:4a:62:75:83:01:aa:b9: - c1:0b:c0:ee:97:c0:73:23:cd:34:ec:bb:3c:95:35: - c8:2d:69:ff:86:d8:1f:c8:04:7e:18:de:62:c2:4b: - 37:c6:aa:8e:03:bf:2b:0d:97:20:2a:75:47:ec:98: - 29:3c:64:52:ef:91:8b:63:0f:6a:f8:c2:9d:08:6a: - 61:68:6f:64:9a:56:b2:0a:bc:7b:59:3d:7f:fd:ba: - 12:4b - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Subject Key Identifier: - 5B:BB:3E:8E:2D:90:AD:AE:58:07:FF:53:00:18:98:FF:44:84:4C:BA - X509v3 Authority Key Identifier: - keyid:5B:BB:3E:8E:2D:90:AD:AE:58:07:FF:53:00:18:98:FF:44:84:4C:BA - - X509v3 Basic Constraints: - CA:TRUE - Signature Algorithm: sha1WithRSAEncryption - 3c:89:0b:bd:49:10:a6:1a:f6:2a:4b:5f:02:3d:ee:f3:19:4f: - c9:10:79:9c:01:ef:88:22:3d:03:5b:1a:14:46:b6:7f:9b:af: - a5:99:1a:d4:d4:9b:d6:6f:c1:fe:96:8f:9a:9e:47:42:b4:ee: - 21:56:6a:c4:92:38:6c:81:cd:8e:31:43:86:7c:97:15:90:80: - d8:21:f0:46:be:2a:2f:f2:96:07:85:74:a8:fa:1b:78:8f:80: - c1:5e:bc:d9:06:c2:33:9e:8e:f9:08:dd:43:7b:6f:5a:22:67: - 46:78:5d:fb:4a:4e:c2:c6:29:94:17:53:a6:c5:a9:d6:67:06: - 4f:07:ef:da:5b:45:21:83:cb:31:b2:dc:dc:ac:13:19:98:3f: - 98:5f:2c:b4:b4:da:d4:43:d7:a9:1a:6e:b6:cf:be:85:a8:80: - 1f:8a:c1:95:8a:83:a4:af:d2:23:4a:b6:18:87:4e:28:31:36: - 03:2c:bf:e4:9e:b6:75:fd:c4:68:ed:4d:d5:a8:fa:a5:81:13: - 17:1c:43:67:02:1c:d0:e6:00:6e:8b:13:e6:60:1f:ba:40:78: - 93:25:ca:59:5a:71:cc:58:d4:52:63:1d:b3:3c:ce:37:f1:89: - 78:fc:13:fa:b3:ea:22:af:17:68:8a:a1:59:57:f5:1a:49:6e: - b9:f6:5f:b3 ------BEGIN CERTIFICATE----- -MIIDizCCAnOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO -MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO -MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy -M1oXDTE4MDkyNTE5MjQyM1owYDELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15 -MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15 -MREwDwYDVQQDDAhEdW1teSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAMLR0DHck8OtiA34k/7MqgQdharDu72HBPBCZxQ0SlaUK7/Qa3IwODk1IIzj -fmWCsH4+HfEYgrfWGVlD7YG+61FE/HeeN63hoBi5S1l5kIGk5FIv/OL/mBBe1ROa -FmIa4MurHa7a0UDUl7Hm4/GXLCpSc6vQohXzHpqwZ9BiZ0t0sLuP754yakwnToJ8 -FmbOBumj2TZP9D68gACTwcoxzwNo1OWLOEW2GzWwwOlKYnWDAaq5wQvA7pfAcyPN -NOy7PJU1yC1p/4bYH8gEfhjeYsJLN8aqjgO/Kw2XICp1R+yYKTxkUu+Ri2MPavjC -nQhqYWhvZJpWsgq8e1k9f/26EksCAwEAAaNQME4wHQYDVR0OBBYEFFu7Po4tkK2u -WAf/UwAYmP9EhEy6MB8GA1UdIwQYMBaAFFu7Po4tkK2uWAf/UwAYmP9EhEy6MAwG -A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADyJC71JEKYa9ipLXwI97vMZ -T8kQeZwB74giPQNbGhRGtn+br6WZGtTUm9Zvwf6Wj5qeR0K07iFWasSSOGyBzY4x -Q4Z8lxWQgNgh8Ea+Ki/ylgeFdKj6G3iPgMFevNkGwjOejvkI3UN7b1oiZ0Z4XftK -TsLGKZQXU6bFqdZnBk8H79pbRSGDyzGy3NysExmYP5hfLLS02tRD16kabrbPvoWo -gB+KwZWKg6Sv0iNKthiHTigxNgMsv+SetnX9xGjtTdWo+qWBExccQ2cCHNDmAG6L -E+ZgH7pAeJMlyllaccxY1FJjHbM8zjfxiXj8E/qz6iKvF2iKoVlX9RpJbrn2X7M= ------END CERTIFICATE----- -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 1 (0x1) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy CA - Validity - Not Before: Oct 21 19:24:23 2013 GMT - Not After : Sep 25 19:24:23 2018 GMT - Subject: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy Intermediate CA - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (2048 bit) - Modulus: - 00:cf:7d:92:07:a5:56:1b:6f:4c:f3:34:c2:12:c2: - 34:62:3b:69:aa:a6:0c:c6:70:5b:93:bc:dc:41:98: - 61:87:61:36:be:8c:08:dd:31:a9:33:76:d3:66:3e: - 77:60:1e:ed:9e:e1:e5:ef:bf:17:91:ac:0c:63:07: - 01:ab:30:67:bc:16:a6:2f:79:f0:61:8c:79:2d:3c: - 98:60:74:61:c4:5f:60:44:85:71:92:9d:cc:7b:14: - 39:74:aa:44:f9:9f:ae:f6:c7:8d:c3:01:47:53:24: - ac:7b:a2:f6:c5:7d:65:37:40:0b:20:c8:d4:14:cd: - f8:f4:57:ea:23:70:f4:e3:99:2b:1c:9a:67:37:ed: - 93:c7:a7:7c:86:90:f7:ae:fc:6f:4b:18:dc:d5:eb: - f3:68:33:d6:78:14:d1:ca:a7:06:7d:75:34:f6:c0: - d4:15:1b:21:2b:78:d9:76:24:a5:f0:c6:13:c8:1e: - 4a:c8:ca:77:34:4e:f8:fa:49:5f:6c:e1:66:a8:65: - f0:8c:bc:44:20:03:ac:af:4a:61:a5:39:48:51:1b: - cb:d8:22:29:60:27:47:42:fc:bf:6a:77:65:58:09: - 20:82:1c:d1:16:5e:5a:18:ea:99:61:8e:93:94:27: - 30:20:dd:44:03:50:43:b4:ec:a3:0f:ee:91:69:d7: - b1:5b - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Basic Constraints: - CA:TRUE - Signature Algorithm: sha1WithRSAEncryption - 39:a0:8d:2f:68:22:1d:4f:3e:db:f1:9b:29:20:77:23:f8:21: - 34:17:84:00:88:a8:3e:a1:4d:84:94:90:96:02:e6:6a:b4:20: - 51:a0:66:20:38:05:18:aa:2a:3e:9a:50:60:af:eb:4a:70:ac: - 9b:59:30:d5:17:14:9c:b4:91:6a:1b:c3:45:8a:dd:cd:2f:c6: - c5:8c:fe:d0:76:20:63:a4:97:db:e3:2a:8e:c1:3d:c8:b6:06: - 2d:49:7a:d9:8a:de:16:ea:5d:5f:fb:41:79:0d:8f:d2:23:00: - d9:b9:6f:93:45:bb:74:17:ea:6b:72:13:01:86:fe:8d:7e:8f: - 27:71:76:a9:37:6d:6c:90:5a:3f:d9:6d:4d:6c:a4:64:7a:ea: - 82:c9:87:ee:6a:d0:6e:30:05:7f:19:1d:19:31:a9:9a:ce:21: - 84:da:47:c7:a0:66:12:e8:7e:57:69:5d:9c:24:e5:46:3c:bf: - 37:f6:88:c3:b1:42:de:3b:81:ed:f5:ae:e2:23:9e:c2:89:a1: - e7:5c:1d:49:0f:ed:ae:55:60:0e:4e:4c:e9:8a:64:e6:ae:c5: - d1:99:a7:70:4c:7e:5d:53:ac:88:2c:0f:0b:21:94:1a:32:f9: - a1:cc:1e:67:98:6b:b6:e9:b1:b9:4b:46:02:b1:65:c9:49:83: - 80:bd:b9:70 ------BEGIN CERTIFICATE----- -MIIDWDCCAkCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO -MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO -MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy -M1oXDTE4MDkyNTE5MjQyM1owbTELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15 -MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15 -MR4wHAYDVQQDDBVEdW1teSBJbnRlcm1lZGlhdGUgQ0EwggEiMA0GCSqGSIb3DQEB -AQUAA4IBDwAwggEKAoIBAQDPfZIHpVYbb0zzNMISwjRiO2mqpgzGcFuTvNxBmGGH -YTa+jAjdMakzdtNmPndgHu2e4eXvvxeRrAxjBwGrMGe8FqYvefBhjHktPJhgdGHE -X2BEhXGSncx7FDl0qkT5n672x43DAUdTJKx7ovbFfWU3QAsgyNQUzfj0V+ojcPTj -mSscmmc37ZPHp3yGkPeu/G9LGNzV6/NoM9Z4FNHKpwZ9dTT2wNQVGyEreNl2JKXw -xhPIHkrIync0Tvj6SV9s4WaoZfCMvEQgA6yvSmGlOUhRG8vYIilgJ0dC/L9qd2VY -CSCCHNEWXloY6plhjpOUJzAg3UQDUEO07KMP7pFp17FbAgMBAAGjEDAOMAwGA1Ud -EwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADmgjS9oIh1PPtvxmykgdyP4ITQX -hACIqD6hTYSUkJYC5mq0IFGgZiA4BRiqKj6aUGCv60pwrJtZMNUXFJy0kWobw0WK -3c0vxsWM/tB2IGOkl9vjKo7BPci2Bi1JetmK3hbqXV/7QXkNj9IjANm5b5NFu3QX -6mtyEwGG/o1+jydxdqk3bWyQWj/ZbU1spGR66oLJh+5q0G4wBX8ZHRkxqZrOIYTa -R8egZhLofldpXZwk5UY8vzf2iMOxQt47ge31ruIjnsKJoedcHUkP7a5VYA5OTOmK -ZOauxdGZp3BMfl1TrIgsDwshlBoy+aHMHmeYa7bpsblLRgKxZclJg4C9uXA= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/README b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/README deleted file mode 100644 index aa7c97d70..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/README +++ /dev/null @@ -1,9 +0,0 @@ -Certificate structure: - -Root CA - | - |-> Intermediate CA - | - |-> Server - | - |-> Client diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/client-crt.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/client-crt.pem deleted file mode 100644 index 5069e08e8..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/client-crt.pem +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDRzCCAi8CAQIwDQYJKoZIhvcNAQEFBQAwbTELMAkGA1UEBhMCVVMxDjAMBgNV -BAgMBUR1bW15MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNV -BAsMBUR1bW15MR4wHAYDVQQDDBVEdW1teSBJbnRlcm1lZGlhdGUgQ0EwHhcNMTMx -MDIxMTkyNDIzWhcNMTgwOTI1MTkyNDIzWjBmMQswCQYDVQQGEwJVUzEOMAwGA1UE -CAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEOMAwGA1UE -CwwFRHVtbXkxFzAVBgNVBAMMDkR1bW15IChjbGllbnQpMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEA4J/+eKqsjK0QS+cSDa5Fh4XM4THy812JkWMySA5r -bFxHZ5ye36/IuwyRQ0Yn2DvhhsDR5K7yz8K+Yfp9A6WRBkyHK/sy/8vurQHeDH3y -lLtHCLi5nfyt2fDxWOYwFrS1giGn2IxJIHBAWu4cBODCkqwqAp92+Lqp3Sn+D+Fb -maHEU3LHua8OIJiSeAIHo/jPqfHFqZxK1bXhGCSQKvUZCaTftsqDtn+LZSElqj1y -5/cnc7XGsTf8ml/+FDMX1aSAHf+pu+UAp9JqOXOM60A5JIpYu3Lsejp1RppyPJYP -zC4nSN8R2LOdDChP2MB7f1/sXRGlLM/X3Vi4X+c6xQ85TQIDAQABMA0GCSqGSIb3 -DQEBBQUAA4IBAQAMWt9qMUOY5z1uyYcjUnconPHLM9MADCZI2sRbfdBOBHEnTVKv -Y63SWnCt8TRJb01LKLIEys6pW1NUlxr6b+FwicNmycR0L8b63cmNXg2NmSZsnK9C -fGT6BbbDdVPYjvmghpSd3soBGBLPsJvaFc6UL5tunm+hT7PxWjDxHZEiE18PTs05 -Vpp/ytILzhoXvJeFOWQHIdf4DLR5izGMNTKdQzgg1eBq2vKgjJIlEZ3j/AyHkJLE -qFip1tyc0PRzgKYFLWttaZzakCLJOGuxtvYB+GrixVM7U23p5LQbLE0KX7fe2Gql -xKMfSID5NUDNf1SuSrrGLD3gfnJEKVB8TVBk ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/client-key.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/client-key.pem deleted file mode 100644 index 7665fb655..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/client-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA4J/+eKqsjK0QS+cSDa5Fh4XM4THy812JkWMySA5rbFxHZ5ye -36/IuwyRQ0Yn2DvhhsDR5K7yz8K+Yfp9A6WRBkyHK/sy/8vurQHeDH3ylLtHCLi5 -nfyt2fDxWOYwFrS1giGn2IxJIHBAWu4cBODCkqwqAp92+Lqp3Sn+D+FbmaHEU3LH -ua8OIJiSeAIHo/jPqfHFqZxK1bXhGCSQKvUZCaTftsqDtn+LZSElqj1y5/cnc7XG -sTf8ml/+FDMX1aSAHf+pu+UAp9JqOXOM60A5JIpYu3Lsejp1RppyPJYPzC4nSN8R -2LOdDChP2MB7f1/sXRGlLM/X3Vi4X+c6xQ85TQIDAQABAoIBABosCiZdHIW3lHKD -leLqL0e/G0QR4dDhUSoTeMRUiceyaM91vD0r6iOBL1u7TOEw+PIOfWY7zCbQ9gXM -fcxy+hbVy9ogBq0vQbv+v7SM6DrUJ06o11fFHSyLmlNVXr0GiS+EZF4i2lJhQd5W -aAVZetJEJRDxK5eHiEswnV2UUGvx6VCpFILL0JVGxWY7oOPxiiBLl+cmfRZdTfGx -46VzQvBu7N8hGpCIsljuVFP/DxR7c+2oyrtFaFSMZBMNI8fICgkb2QeLk/XUBXtn -0bDttgmOP/BvnNAor7nIRoeer/7kbXc9jOsgXwnvDKPapltQddL+exycXzbIjLuY -Z2SFsDECgYEA+2A4QGV0biqdICAoKCHCHCU/CrdDUQiQDHqRU6/nhka7MFPSl4Wy -9oISRrYZhKIbSbaXwTW5ZcYq8Hpn/yGYIWlINP9sjprnOWPE7L74lac+PFWXNMUI -jNJOJkLK1IeppByXAt5ekGBrG556bhzRCJsTjYsyUR/r/bMEF1FD8WMCgYEA5MHM -hqmkDK5CbklVaPonNc251Lx+HSzzQ40WExC/PrCczRaZMKlhmyKZfWJCInQsUDln -w6Lqa5UnwZV2HYAF30VZYQsq84ulNnx1/36BEZyIimfAL1WHvKeGWjGsZqniXxxb -Os5wEMAvxk0SWVrR5v6YpBDv3t9+lLg/bzBOAY8CgYEAuZ0q7CH9/vroWrhj7n4+ -3pmCG1+HDWbNNumqNalFxBimT+EVN1058FvLMvtzjERG8f8pvzj0VPom6rr336Pm -uYUMFFYmyoYHBpFs74Nz+s0rX1Gz/PsgfRstKYNYUeZ6lPunZi7clK8dZ591t6j/ -kOMxZOrLlKuFjieJdc5D5RECgYAVTzxXOwxOJhmIHoq3Sb5HU8/A0oJJA3vxyf3J -buDx3Q/uRvGkR9MQ2YtE09dnUD0kiARzhASkWvOmI98p5lglsVcfJCQvJc4RIkz3 -rPgnBNbvVbTgc+4+E7j/Q+tUcPTmeUTCWKK13MFWjq1r53rwMr1TY0SFFXq8LeGy -4OQTXwKBgQDCuPN3Q+EJusYy7TXt0WicY/xyu15s1216N7PmRKFr/WAn2JdAfjbD -JKDwVqo0AQiEDAobJk0JMPs+ENK2d58GsybCK4QGAh6z5FGunb5T432YfnoXtL3J -ZKVvkf7eowvokTIeiDf3XrCPajLDBpo88Xax+RH03US7XRdu/fVzMA== ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/intermediateCA-crt.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/intermediateCA-crt.pem deleted file mode 100644 index 6b2658ae2..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/intermediateCA-crt.pem +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDWDCCAkCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO -MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO -MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy -M1oXDTE4MDkyNTE5MjQyM1owbTELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15 -MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15 -MR4wHAYDVQQDDBVEdW1teSBJbnRlcm1lZGlhdGUgQ0EwggEiMA0GCSqGSIb3DQEB -AQUAA4IBDwAwggEKAoIBAQDPfZIHpVYbb0zzNMISwjRiO2mqpgzGcFuTvNxBmGGH -YTa+jAjdMakzdtNmPndgHu2e4eXvvxeRrAxjBwGrMGe8FqYvefBhjHktPJhgdGHE -X2BEhXGSncx7FDl0qkT5n672x43DAUdTJKx7ovbFfWU3QAsgyNQUzfj0V+ojcPTj -mSscmmc37ZPHp3yGkPeu/G9LGNzV6/NoM9Z4FNHKpwZ9dTT2wNQVGyEreNl2JKXw -xhPIHkrIync0Tvj6SV9s4WaoZfCMvEQgA6yvSmGlOUhRG8vYIilgJ0dC/L9qd2VY -CSCCHNEWXloY6plhjpOUJzAg3UQDUEO07KMP7pFp17FbAgMBAAGjEDAOMAwGA1Ud -EwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADmgjS9oIh1PPtvxmykgdyP4ITQX -hACIqD6hTYSUkJYC5mq0IFGgZiA4BRiqKj6aUGCv60pwrJtZMNUXFJy0kWobw0WK -3c0vxsWM/tB2IGOkl9vjKo7BPci2Bi1JetmK3hbqXV/7QXkNj9IjANm5b5NFu3QX -6mtyEwGG/o1+jydxdqk3bWyQWj/ZbU1spGR66oLJh+5q0G4wBX8ZHRkxqZrOIYTa -R8egZhLofldpXZwk5UY8vzf2iMOxQt47ge31ruIjnsKJoedcHUkP7a5VYA5OTOmK -ZOauxdGZp3BMfl1TrIgsDwshlBoy+aHMHmeYa7bpsblLRgKxZclJg4C9uXA= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/intermediateCA-key.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/intermediateCA-key.pem deleted file mode 100644 index 747736097..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/intermediateCA-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAz32SB6VWG29M8zTCEsI0YjtpqqYMxnBbk7zcQZhhh2E2vowI -3TGpM3bTZj53YB7tnuHl778XkawMYwcBqzBnvBamL3nwYYx5LTyYYHRhxF9gRIVx -kp3MexQ5dKpE+Z+u9seNwwFHUySse6L2xX1lN0ALIMjUFM349FfqI3D045krHJpn -N+2Tx6d8hpD3rvxvSxjc1evzaDPWeBTRyqcGfXU09sDUFRshK3jZdiSl8MYTyB5K -yMp3NE74+klfbOFmqGXwjLxEIAOsr0phpTlIURvL2CIpYCdHQvy/andlWAkgghzR -Fl5aGOqZYY6TlCcwIN1EA1BDtOyjD+6RadexWwIDAQABAoIBAEs6OsS85DBENUEE -QszsTnPDGLd/Rqh3uiwhUDYUGmAsFd4WBWy1AaSgE1tBkKRv8jUlr+kxfkkZeNA6 -jRdVEHc4Ov6Blm63sIN/Mbve1keNUOjm/NtsjOOe3In45dMfWx8sELC/+O0jIcod -tpy5rwXOGXrEdWgpmXZ1nXVGEfOmQH3eGEPkqbY1I4YlAoXD0mc5fNQQrn7qrogH -M5USCnC44yIIF0Yube2Fg0Cem41vzIvENAlZC273gyW+pQwez0uma2LaCWmkEz1N -sESrNSQ4yeQnDQYlgX2w3RRpqql4GDzAdISL2WJcNhW6KJ72B0SQ1ny/TmQgZePG -Ojv1T0ECgYEA9CXqKyXBSPF+Wdc/fNagrIi6tcNkLAN2/p5J3Z6TtbZGjItoMlDX -c+hwHobcI3GZLMlxlBx7ePc7cKgaMDXrl8BZZjFoyEV9OHOLicfNkLFmBIZ14gtX -bGZYDuCcal46r7IKRjT8lcYWCoLJnI9vLEII7Q7P/eBgcntw3+h/ziECgYEA2ZAa -bp9d0xBaOXq/E341guxNG49R09/DeZ/2CEM+V1pMD8OVH9cvxrBdDLUmAnrqeGTh -Djoi1UEbOVAV6/dXbTQHrla+HF4Uq+t9tV+mt68TEa54PQ/ERt5ih3nZGBiqZ6rX -SGeyZmIXMLIZEs2dIbJ2DmLcZj6Tjxkd/PxPt/sCgYBGczZaEv/uK3k5NWplfI1K -m/28e1BJfwp0OHq6D4sx8RH0djmv4zH4iUbpGCMnuxznFo3Gnl1mr3igbnF4HecI -mAF0AqfoulyC0JygOl5v9TCp957Ghl1Is1OPn3KjIuOuVSKv1ZRZJ5qul8TTf3Qm -AjwPI6oS6Q8LmeEdSzqt4QKBgB5MglHboe5t/ZK5tHibgApOrGJlMEkohYmfrFz0 -OG9j5OnhHBiGGGI8V4kYhUWdJqBDtFAN6qH2Yjs2Gwd0t9k+gL9X1zwOIiTbM/OZ -cZdtK2Ov/5DJbFVOTTx+zKwda0Xqtfagcmjtyjr+4p0Kw5JYzzYrsHQQzO4F2nZM -ETIXAoGADskTzhgpPrC5/qfuLY4gBUtCfYIb8kaKN90AT8A/14lBrT4lSnmsEvKP -tRDmFjnc/ogDlHa5SRDijtT6UoyQPuauAt6DYrJ8G6qKJqiMwJcuLV1XFks7z1J8 -VzB8kso1pPAtcvVXBPklsjvZ10NdQOCqm4N3EVp69agbB1oco4I= ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/mosquitto.org.crt b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/mosquitto.org.crt deleted file mode 100644 index b8535e887..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/mosquitto.org.crt +++ /dev/null @@ -1,18 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIC8DCCAlmgAwIBAgIJAOD63PlXjJi8MA0GCSqGSIb3DQEBBQUAMIGQMQswCQYD -VQQGEwJHQjEXMBUGA1UECAwOVW5pdGVkIEtpbmdkb20xDjAMBgNVBAcMBURlcmJ5 -MRIwEAYDVQQKDAlNb3NxdWl0dG8xCzAJBgNVBAsMAkNBMRYwFAYDVQQDDA1tb3Nx -dWl0dG8ub3JnMR8wHQYJKoZIhvcNAQkBFhByb2dlckBhdGNob28ub3JnMB4XDTEy -MDYyOTIyMTE1OVoXDTIyMDYyNzIyMTE1OVowgZAxCzAJBgNVBAYTAkdCMRcwFQYD -VQQIDA5Vbml0ZWQgS2luZ2RvbTEOMAwGA1UEBwwFRGVyYnkxEjAQBgNVBAoMCU1v -c3F1aXR0bzELMAkGA1UECwwCQ0ExFjAUBgNVBAMMDW1vc3F1aXR0by5vcmcxHzAd -BgkqhkiG9w0BCQEWEHJvZ2VyQGF0Y2hvby5vcmcwgZ8wDQYJKoZIhvcNAQEBBQAD -gY0AMIGJAoGBAMYkLmX7SqOT/jJCZoQ1NWdCrr/pq47m3xxyXcI+FLEmwbE3R9vM -rE6sRbP2S89pfrCt7iuITXPKycpUcIU0mtcT1OqxGBV2lb6RaOT2gC5pxyGaFJ+h -A+GIbdYKO3JprPxSBoRponZJvDGEZuM3N7p3S/lRoi7G5wG5mvUmaE5RAgMBAAGj -UDBOMB0GA1UdDgQWBBTad2QneVztIPQzRRGj6ZHKqJTv5jAfBgNVHSMEGDAWgBTa -d2QneVztIPQzRRGj6ZHKqJTv5jAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUA -A4GBAAqw1rK4NlRUCUBLhEFUQasjP7xfFqlVbE2cRy0Rs4o3KS0JwzQVBwG85xge -REyPOFdGdhBY2P1FNRy0MDr6xr+D2ZOwxs63dG1nnAnWZg7qwoLgpZ4fESPD3PkA -1ZgKJc2zbSQ9fCPxt2W3mdVav66c6fsb7els2W2Iz7gERJSX ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/rootCA-crt.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/rootCA-crt.pem deleted file mode 100644 index 1ddb0d494..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/rootCA-crt.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDizCCAnOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO -MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO -MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy -M1oXDTE4MDkyNTE5MjQyM1owYDELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15 -MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15 -MREwDwYDVQQDDAhEdW1teSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAMLR0DHck8OtiA34k/7MqgQdharDu72HBPBCZxQ0SlaUK7/Qa3IwODk1IIzj -fmWCsH4+HfEYgrfWGVlD7YG+61FE/HeeN63hoBi5S1l5kIGk5FIv/OL/mBBe1ROa -FmIa4MurHa7a0UDUl7Hm4/GXLCpSc6vQohXzHpqwZ9BiZ0t0sLuP754yakwnToJ8 -FmbOBumj2TZP9D68gACTwcoxzwNo1OWLOEW2GzWwwOlKYnWDAaq5wQvA7pfAcyPN -NOy7PJU1yC1p/4bYH8gEfhjeYsJLN8aqjgO/Kw2XICp1R+yYKTxkUu+Ri2MPavjC -nQhqYWhvZJpWsgq8e1k9f/26EksCAwEAAaNQME4wHQYDVR0OBBYEFFu7Po4tkK2u -WAf/UwAYmP9EhEy6MB8GA1UdIwQYMBaAFFu7Po4tkK2uWAf/UwAYmP9EhEy6MAwG -A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADyJC71JEKYa9ipLXwI97vMZ -T8kQeZwB74giPQNbGhRGtn+br6WZGtTUm9Zvwf6Wj5qeR0K07iFWasSSOGyBzY4x -Q4Z8lxWQgNgh8Ea+Ki/ylgeFdKj6G3iPgMFevNkGwjOejvkI3UN7b1oiZ0Z4XftK -TsLGKZQXU6bFqdZnBk8H79pbRSGDyzGy3NysExmYP5hfLLS02tRD16kabrbPvoWo -gB+KwZWKg6Sv0iNKthiHTigxNgMsv+SetnX9xGjtTdWo+qWBExccQ2cCHNDmAG6L -E+ZgH7pAeJMlyllaccxY1FJjHbM8zjfxiXj8E/qz6iKvF2iKoVlX9RpJbrn2X7M= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/rootCA-key.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/rootCA-key.pem deleted file mode 100644 index 278287687..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/rootCA-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAwtHQMdyTw62IDfiT/syqBB2FqsO7vYcE8EJnFDRKVpQrv9Br -cjA4OTUgjON+ZYKwfj4d8RiCt9YZWUPtgb7rUUT8d543reGgGLlLWXmQgaTkUi/8 -4v+YEF7VE5oWYhrgy6sdrtrRQNSXsebj8ZcsKlJzq9CiFfMemrBn0GJnS3Swu4/v -njJqTCdOgnwWZs4G6aPZNk/0PryAAJPByjHPA2jU5Ys4RbYbNbDA6UpidYMBqrnB -C8Dul8BzI8007Ls8lTXILWn/htgfyAR+GN5iwks3xqqOA78rDZcgKnVH7JgpPGRS -75GLYw9q+MKdCGphaG9kmlayCrx7WT1//boSSwIDAQABAoIBAGphOzge5Cjzdtl6 -JQX7J9M7c6O9YaSqN44iFDs6GmWQXxtMaX9eyTSjx/RmvLwdUtZ8gMkHw0kzBYBy -0RwJ7mDgNKP0px6xl0Qo2fYvpTLFoU8nmQUy4AwAXIVpnFNRrfJIq9qw7ZZi/7pL -A6kGDT3G7Bajw/4MVWfOb8GgGhte1ZhZgXFEZNjGkhwi3Na1/6slOQIfnkkhco0X -ru1Cw82nXNPHqu6K+pbHP9ucYdUNZWRh+yQS3p92lr5tB3/IL/lD0Cl3+xP8JFl+ -5NMSISOKGb3ld0rzrJd1ncgLgv/XlHu8DqvcFs9QwXbaUlG0U/0GrorGYqFaZYaH -R1rkZjECgYEA9mAarVAeL7IOeEIg28f/qyp//5+pMzRpVhnI+xscHB5QUO9WH+uE -nOXwcGvcRME134H4o/0j75aMhVs7sGfMOQ+enAwOxRC5h4MCClDSWysWftU8Ihhf -Sm6eZ0kYLZNqXt/TxTs124NiF1Bb5pekzEr9fTj//vP4meuAQ/D0JoUCgYEAym4f -BCm5tLwYYxZM4tko0g9BHxy4aAPfyshuLed1JjkK4JCFp368GBoknj5rUNewTun2 -1zkQF9b5Mi3k5qWkboP5rpp7DuG3PJdWypV6b/btUeqcyG1gteQwTAwebfqeM0vH -QvpuAoRMtEcSBQBl2s9zgmObXUpDlLwuIlL+to8CgYEAyJBtxx8Mo9k4jE+Q/jnu -+QFtF8R68jM9eRkeksR7+qv2yBw+KVgKKcvKE0rLErGS0LO2nJELexQ8qqcdjTrC -dsUvYmsybtxxnE5bD9jBlfQaqP+fp0Xd9PLeQsivRRLXqgpeFBZifqOS69XAKpTS -VHjLqPAI/hzQCUU8spJpvx0CgYAePgt2NMGgxcUi8I72CRl3IH5LJqBKMeH6Sq1j -QEQZPMZqPE0rc9yoASfdWFfyEPcvIvcUulq0JRK/s2mSJ8cEF8Vyl3OxCnm0nKuD -woczOQHFjjZ0HxsmsXuhsOHO7nU6FqUjVYSf7aIEAOYpRyDwarPIFBd+/XxROTfv -OtUA8wKBgAOiGXRxycb4rAtJBDqPAgdAAwNgvQHyVgn32ArWtgu8ermuZW5h1y45 -hULFvCbLSCpo+I7QhRhw4y2DoB1DgIw04BeFUIcE+az7HH3euAyCLQ0caaA8Xk/6 -bpPfUMe1SNi51f345QlOPvvwGllTC6DeBhZ730k7VNB32dOCV3kE ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/server-crt.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/server-crt.pem deleted file mode 100644 index f3de3caa2..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/server-crt.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDYTCCAkmgAwIBAgIBATANBgkqhkiG9w0BAQUFADBtMQswCQYDVQQGEwJVUzEO -MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO -MAwGA1UECwwFRHVtbXkxHjAcBgNVBAMMFUR1bW15IEludGVybWVkaWF0ZSBDQTAe -Fw0xMzEwMjExOTI0MjNaFw0xODA5MjUxOTI0MjNaMGYxCzAJBgNVBAYTAlVTMQ4w -DAYDVQQIDAVEdW1teTEOMAwGA1UEBwwFRHVtbXkxDjAMBgNVBAoMBUR1bW15MQ4w -DAYDVQQLDAVEdW1teTEXMBUGA1UEAwwORHVtbXkgKHNlcnZlcikwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0fQCRUWXt+i7JMR55Zuo6wBRxG7RnPutN -2L7J/18io52vxjm8AZDiC0JFkCHh72ZzvbgVA+e+WxAIYfioRis4JWw4jK8v5m8q -cZzS0GJNTMROPiZQi7A81tAbrV00XN7d5PsmIJ2Bf4XbJWMy31CsmoFloeRMd7bR -LxwDIb0qqRawhKsWdfZB/c9wGKmHlei50B7PXk+koKnVdsLwXxtCZDvc/3fNRHEK -lZs4m0N05G38FdrnczPm/0pie87nK9rnklL7u1sYOukOznnOtW5h7+A4M+DxzME0 -HRU6k4d+6QvukxBlsE93gHhwRsejIuDGlqD+DRxk2PdmmgsmPH59AgMBAAGjEzAR -MA8GA1UdEQQIMAaHBAoKBOQwDQYJKoZIhvcNAQEFBQADggEBAJ3bKs2b4cAJWTZj -69dMEfYZKcQIXs7euwtKlP7H8m5c+X5KmZPi1Puq4Z0gtvLu/z7J9UjZjG0CoylV -q15Zp5svryJ7XzcsZs7rwyo1JtngW1z54wr9MezqIOF2w12dTwEAINFsW7TxAsH7 -bfqkzZjuCbbsww5q4eHuZp0yaMHc3hOGaUot27OTlxlIMhv7VBBqWAj0jmvAfTKf -la0SiL/Mc8rD8D5C0SXGcCL6li/kqtinAxzhokuyyPf+hQX35kcZxEPu6WxtYVLv -hMzrokOZP2FrGbCnhaNT8gw4Aa0RXV1JgonRWYSbkeaCzvr2bJ0OuJiDdwdRKvOo -raKLlfY= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/server-key.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/server-key.pem deleted file mode 100644 index 951ad0efa..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/server-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAtH0AkVFl7fouyTEeeWbqOsAUcRu0Zz7rTdi+yf9fIqOdr8Y5 -vAGQ4gtCRZAh4e9mc724FQPnvlsQCGH4qEYrOCVsOIyvL+ZvKnGc0tBiTUzETj4m -UIuwPNbQG61dNFze3eT7JiCdgX+F2yVjMt9QrJqBZaHkTHe20S8cAyG9KqkWsISr -FnX2Qf3PcBiph5XoudAez15PpKCp1XbC8F8bQmQ73P93zURxCpWbOJtDdORt/BXa -53Mz5v9KYnvO5yva55JS+7tbGDrpDs55zrVuYe/gODPg8czBNB0VOpOHfukL7pMQ -ZbBPd4B4cEbHoyLgxpag/g0cZNj3ZpoLJjx+fQIDAQABAoIBAG0UfxtUTn4dDdma -TgihIj6Ph8s0Kzua0yshK215YU3WBJ8O9iWh7KYwl8Ti7xdVUF3y8yYATjbFYlMu -otFQVx5/v4ANxnL0mYrVTyo5tq9xDdMbzJwxUDn0uaGAjSvwVOFWWlMYsxhoscVY -OzOrs14dosaBqTBtyZdzGULrSSBWPCBlucRcvTV/eZwgYrYJ3bG66ZTfdc930KPj -nfkWrsAWmPz8irHoWQ2OX+ZJTprVYRYIZXqpFn3zuwmhpJkZUVULMMk6LFBKDmBT -F2+b4h49P+oNJ+6CRoOERHYq2k1MmYBcu1z8lMjdfRGUDdK4vS9pcqhBXJJg1vU9 -APRtfiECgYEA6Y3LqQJLkUI0w6g/9T+XyzUoi0aUfH6PT81XnGYqJxTBHinZvgML -mF3qtZ0bHGwEoAsyhSgDkeCawE/E7Phd+B6aku2QMVm8GHygZg0Pbao4cxXv+CF3 -i1Lo7n3zY0kTVrjsvDRsDDESmRK4Ea48fJwOfUEtfG6VDtwmZAe8chcCgYEAxdWd -sWcc45ARi2vY6yb5Ysgt/g0z26KyQydF+GMWIz1FDfUxXJ/axdCovd3VIHDvItJE -n9LjFiobkyOKX99ou1foWwsmhn11duVrF7hsVrE0nsbd4RX3sTbqXa9x3GN/ujFr -0xHUTmiXt3Qyn/076jBiLGnbtzSxJ/IZIEI9VIsCgYEAketHnTaT5BOLR9ss6ptq -yUlTJYFZcFbaTy+qV0r1dyleZuwa4L6iVfYHmKSptZ4/XYbhb5RKdq/vv8uW679Z -ZpYoWTgX6N15yYrD5D6wrwG09yJzpYGzYNbSNX93u0aC0KIFNqlCAHQAfKbXXiSQ -IgKWgudf9ehZNMmTKtgygs0CgYAoTV9Fr7Lj7QqV84+KQDNX2137PmdNHDTil1Ka -ylzNKwMxV70JmIsx91MY8uMjK76bwmg2gvi+IC/j5r6ez11/pOXx/jCH/3D5mr0Z -ZPm1I36LxgmXfCkskfpmwYIZmq9/l+fWZPByVL5roiFaFHWrPNYTJDGdff+FGr3h -o3zpBwKBgDY1sih/nY+6rwOP+DcabGK9KFFKLXsoJrXobEniLxp7oFaGN2GkmKvN -NajCs5pr3wfb4LrVrsNvERnUsUXWg6ReLqfWbT4bmjzE2iJ3IbtVQ5M4kl6YrbdZ -PMgWoLCqnoo8NoGBtmVMWhaXNJvVZPgZHk33T5F0Cg6PKNdHDchH ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sango.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sango.go deleted file mode 100644 index d3f755025..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sango.go +++ /dev/null @@ -1,51 +0,0 @@ -package main - -import ( - "fmt" - "os" - "time" - - MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" -) - -var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) -} - -var onConnect MQTT.OnConnectHandler = func(client *MQTT.Client) { - fmt.Println("onConnect") - if token := client.Subscribe("shirou@github/#", 0, nil); token.Wait() && token.Error() != nil { - fmt.Println(token.Error()) - os.Exit(1) - } -} - -var subscribed = "#" - -func main() { - opts := MQTT.NewClientOptions().AddBroker("tcp://lite.mqtt.shiguredo.jp:1883") - opts.SetDefaultPublishHandler(f) - opts.SetOnConnectHandler(onConnect) - opts.SetCleanSession(true) - - opts.SetUsername("shirou@github") - opts.SetPassword("8Ub6F68kfYlr7RoV") - - c := MQTT.NewClient(opts) - if token := c.Connect(); token.Wait() && token.Error() != nil { - panic(token.Error()) - } - - qos := 0 - retain := false - payload := "sanple" - topic := "shirou@github/log" - token := c.Publish(topic, byte(qos), retain, payload) - // token.Wait() - fmt.Println("%v", token.Error()) - - for { - time.Sleep(1 * time.Second) - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim.go deleted file mode 100644 index 0a8f4865c..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim.go +++ /dev/null @@ -1,35 +0,0 @@ -package main - -import ( - "fmt" - "os" - "time" - - MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" -) - -var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) -} - -var subscribed = "#" - -func main() { - opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883") - opts.SetDefaultPublishHandler(f) - opts.SetCleanSession(true) - - c := MQTT.NewClient(opts) - if token := c.Connect(); token.Wait() && token.Error() != nil { - panic(token.Error()) - } - - if token := c.Subscribe("#", 0, nil); token.Wait() && token.Error() != nil { - fmt.Println(token.Error()) - os.Exit(1) - } - for { - time.Sleep(1 * time.Second) - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim2.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim2.go deleted file mode 100644 index 3d9b7d262..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim2.go +++ /dev/null @@ -1,42 +0,0 @@ -package main - -import ( - "fmt" - "os" - "time" - - MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" -) - -var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) -} - -var onConnect MQTT.OnConnectHandler = func(client *MQTT.Client) { - fmt.Println("onConnect") - if token := client.Subscribe("#", 0, nil); token.Wait() && token.Error() != nil { - fmt.Println(token.Error()) - os.Exit(1) - } -} - -var subscribed = "#" - -func main() { - // opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883") - opts := MQTT.NewClientOptions().AddBroker("tcp://lite.mqtt.shiguredo.jp:1883") - opts.SetDefaultPublishHandler(f) - opts.SetOnConnectHandler(onConnect) - opts.SetCleanSession(true) - - opts.SetUsername("shirou@github.com") - c := MQTT.NewClient(opts) - if token := c.Connect(); token.Wait() && token.Error() != nil { - panic(token.Error()) - } - - for { - time.Sleep(1 * time.Second) - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim_pub.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim_pub.go deleted file mode 100644 index 3f89f8af2..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim_pub.go +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package main - -import ( - "flag" - "fmt" - "os" - - MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" -) - -/* -Options: - [-help] Display help - [-a pub|sub] Action pub (publish) or sub (subscribe) - [-m ] Payload to send - [-n ] Number of messages to send or receive - [-q 0|1|2] Quality of Service - [-clean] CleanSession (true if -clean is present) - [-id ] CliendID - [-user ] User - [-password ] Password - [-broker ] Broker URI - [-topic ] Topic - [-store ] Store Directory - -*/ - -func main() { - topic := flag.String("topic", "", "The topic name to/from which to publish/subscribe") - broker := flag.String("broker", "tcp://iot.eclipse.org:1883", "The broker URI. ex: tcp://10.10.1.1:1883") - password := flag.String("password", "", "The password (optional)") - user := flag.String("user", "", "The User (optional)") - id := flag.String("id", "testgoid", "The ClientID (optional)") - cleansess := flag.Bool("clean", false, "Set Clean Session (default false)") - qos := flag.Int("qos", 0, "The Quality of Service 0,1,2 (default 0)") - num := flag.Int("num", 1, "The number of messages to publish or subscribe (default 1)") - payload := flag.String("message", "", "The message text to publish (default empty)") - action := flag.String("action", "", "Action publish or subscribe (required)") - store := flag.String("store", ":memory:", "The Store Directory (default use memory store)") - flag.Parse() - - if *action != "pub" && *action != "sub" { - fmt.Println("Invalid setting for -action, must be pub or sub") - return - } - - if *topic == "" { - fmt.Println("Invalid setting for -topic, must not be empty") - return - } - - fmt.Printf("Sample Info:\n") - fmt.Printf("\taction: %s\n", *action) - fmt.Printf("\tbroker: %s\n", *broker) - fmt.Printf("\tclientid: %s\n", *id) - fmt.Printf("\tuser: %s\n", *user) - fmt.Printf("\tpassword: %s\n", *password) - fmt.Printf("\ttopic: %s\n", *topic) - fmt.Printf("\tmessage: %s\n", *payload) - fmt.Printf("\tqos: %d\n", *qos) - fmt.Printf("\tcleansess: %v\n", *cleansess) - fmt.Printf("\tnum: %d\n", *num) - fmt.Printf("\tstore: %s\n", *store) - - opts := MQTT.NewClientOptions() - opts.AddBroker(*broker) - opts.SetClientID(*id) - opts.SetUsername(*user) - opts.SetPassword(*password) - opts.SetCleanSession(*cleansess) - if *store != ":memory:" { - opts.SetStore(MQTT.NewFileStore(*store)) - } - - if *action == "pub" { - client := MQTT.NewClient(opts) - if token := client.Connect(); token.Wait() && token.Error() != nil { - panic(token.Error()) - } - fmt.Println("Sample Publisher Started") - for i := 0; i < *num; i++ { - fmt.Println("---- doing publish ----") - token := client.Publish(*topic, byte(*qos), false, *payload) - token.Wait() - } - - client.Disconnect(250) - fmt.Println("Sample Publisher Disconnected") - } else { - receiveCount := 0 - choke := make(chan [2]string) - - opts.SetDefaultPublishHandler(func(client *MQTT.Client, msg MQTT.Message) { - choke <- [2]string{msg.Topic(), string(msg.Payload())} - }) - - client := MQTT.NewClient(opts) - if token := client.Connect(); token.Wait() && token.Error() != nil { - panic(token.Error()) - } - - if token := client.Subscribe(*topic, byte(*qos), nil); token.Wait() && token.Error() != nil { - fmt.Println(token.Error()) - os.Exit(1) - } - - for receiveCount < *num { - incoming := <-choke - fmt.Printf("RECEIVED TOPIC: %s MESSAGE: %s\n", incoming[0], incoming[1]) - receiveCount++ - } - - client.Disconnect(250) - fmt.Println("Sample Subscriber Disconnected") - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/simple.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/simple.go deleted file mode 100644 index 0caf2d5c0..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/simple.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package main - -import ( - "fmt" - "os" - "time" - - MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" -) - -var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) -} - -func main() { - opts := MQTT.NewClientOptions().AddBroker("tcp://iot.eclipse.org:1883").SetClientID("gotrivial") - opts.SetDefaultPublishHandler(f) - - c := MQTT.NewClient(opts) - if token := c.Connect(); token.Wait() && token.Error() != nil { - panic(token.Error()) - } - - if token := c.Subscribe("/go-mqtt/sample", 0, nil); token.Wait() && token.Error() != nil { - fmt.Println(token.Error()) - os.Exit(1) - } - - for i := 0; i < 5; i++ { - text := fmt.Sprintf("this is msg #%d!", i) - token := c.Publish("/go-mqtt/sample", 0, false, text) - token.Wait() - } - - time.Sleep(3 * time.Second) - - if token := c.Unsubscribe("/go-mqtt/sample"); token.Wait() && token.Error() != nil { - fmt.Println(token.Error()) - os.Exit(1) - } - - c.Disconnect(250) -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/ssl.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/ssl.go deleted file mode 100644 index c4efc27f2..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/ssl.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -/* -To run this sample, The following certificates -must be created: - - rootCA-crt.pem - root certificate authority that is used - to sign and verify the client and server - certificates. - rootCA-key.pem - keyfile for the rootCA. - - server-crt.pem - server certificate signed by the CA. - server-key.pem - keyfile for the server certificate. - - client-crt.pem - client certificate signed by the CA. - client-key.pem - keyfile for the client certificate. - - CAfile.pem - file containing concatenated CA certificates - if there is more than 1 in the chain. - (e.g. root CA -> intermediate CA -> server cert) - - Instead of creating CAfile.pem, rootCA-crt.pem can be added - to the default openssl CA certificate bundle. To find the - default CA bundle used, check: - $GO_ROOT/src/pks/crypto/x509/root_unix.go - To use this CA bundle, just set tls.Config.RootCAs = nil. -*/ - -package main - -import "io/ioutil" -import "fmt" -import "time" -import "crypto/tls" -import "crypto/x509" -import MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" - -func NewTLSConfig() *tls.Config { - // Import trusted certificates from CAfile.pem. - // Alternatively, manually add CA certificates to - // default openssl CA bundle. - certpool := x509.NewCertPool() - pemCerts, err := ioutil.ReadFile("samplecerts/CAfile.pem") - if err == nil { - certpool.AppendCertsFromPEM(pemCerts) - } - - // Import client certificate/key pair - cert, err := tls.LoadX509KeyPair("samplecerts/client-crt.pem", "samplecerts/client-key.pem") - if err != nil { - panic(err) - } - - // Just to print out the client certificate.. - cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - panic(err) - } - fmt.Println(cert.Leaf) - - // Create tls.Config with desired tls properties - return &tls.Config{ - // RootCAs = certs used to verify server cert. - RootCAs: certpool, - // ClientAuth = whether to request cert from server. - // Since the server is set up for SSL, this happens - // anyways. - ClientAuth: tls.NoClientCert, - // ClientCAs = certs used to validate client cert. - ClientCAs: nil, - // InsecureSkipVerify = verify that cert contents - // match server. IP matches what is in cert etc. - InsecureSkipVerify: true, - // Certificates = list of certs client sends to server. - Certificates: []tls.Certificate{cert}, - } -} - -var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) { - fmt.Printf("TOPIC: %s\n", msg.Topic()) - fmt.Printf("MSG: %s\n", msg.Payload()) -} - -func main() { - tlsconfig := NewTLSConfig() - - opts := MQTT.NewClientOptions() - opts.AddBroker("ssl://iot.eclipse.org:8883") - opts.SetClientID("ssl-sample").SetTLSConfig(tlsconfig) - opts.SetDefaultPublishHandler(f) - - // Start the connection - c := MQTT.NewClient(opts) - if token := c.Connect(); token.Wait() && token.Error() != nil { - panic(token.Error()) - } - - c.Subscribe("/go-mqtt/sample", 0, nil) - - i := 0 - for _ = range time.Tick(time.Duration(1) * time.Second) { - if i == 5 { - break - } - text := fmt.Sprintf("this is msg #%d!", i) - c.Publish("/go-mqtt/sample", 0, false, text) - i++ - } - - c.Disconnect(250) -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/stdinpub.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/stdinpub.go deleted file mode 100644 index d5604d2a3..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/stdinpub.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package main - -import ( - "bufio" - "crypto/tls" - "flag" - "fmt" - "io" - //"log" - "os" - "strconv" - "time" -) - -import MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" - -func main() { - //MQTT.DEBUG = log.New(os.Stdout, "", 0) - //MQTT.ERROR = log.New(os.Stdout, "", 0) - stdin := bufio.NewReader(os.Stdin) - hostname, _ := os.Hostname() - - server := flag.String("server", "tcp://127.0.0.1:1883", "The full URL of the MQTT server to connect to") - topic := flag.String("topic", hostname, "Topic to publish the messages on") - qos := flag.Int("qos", 0, "The QoS to send the messages at") - retained := flag.Bool("retained", false, "Are the messages sent with the retained flag") - clientid := flag.String("clientid", hostname+strconv.Itoa(time.Now().Second()), "A clientid for the connection") - username := flag.String("username", "", "A username to authenticate to the MQTT server") - password := flag.String("password", "", "Password to match username") - flag.Parse() - - connOpts := MQTT.NewClientOptions().AddBroker(*server).SetClientID(*clientid).SetCleanSession(true) - if *username != "" { - connOpts.SetUsername(*username) - if *password != "" { - connOpts.SetPassword(*password) - } - } - tlsConfig := &tls.Config{InsecureSkipVerify: true, ClientAuth: tls.NoClientCert} - connOpts.SetTLSConfig(tlsConfig) - - client := MQTT.NewClient(connOpts) - if token := client.Connect(); token.Wait() && token.Error() != nil { - fmt.Println(token.Error()) - return - } - fmt.Printf("Connected to %s\n", *server) - - for { - message, err := stdin.ReadString('\n') - if err == io.EOF { - os.Exit(0) - } - client.Publish(*topic, byte(*qos), *retained, message) - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/stdoutsub.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/stdoutsub.go deleted file mode 100644 index a6b058de1..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/stdoutsub.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package main - -import ( - "crypto/tls" - "flag" - "fmt" - //"log" - "os" - "os/signal" - "strconv" - "syscall" - "time" - - MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" -) - -func onMessageReceived(client *MQTT.Client, message MQTT.Message) { - fmt.Printf("Received message on topic: %s\nMessage: %s\n", message.Topic(), message.Payload()) -} - -var i int64 - -func main() { - //MQTT.DEBUG = log.New(os.Stdout, "", 0) - //MQTT.ERROR = log.New(os.Stdout, "", 0) - c := make(chan os.Signal, 1) - i = 0 - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - go func() { - <-c - fmt.Println("signal received, exiting") - os.Exit(0) - }() - - hostname, _ := os.Hostname() - - server := flag.String("server", "tcp://127.0.0.1:1883", "The full url of the MQTT server to connect to ex: tcp://127.0.0.1:1883") - topic := flag.String("topic", "#", "Topic to subscribe to") - qos := flag.Int("qos", 0, "The QoS to subscribe to messages at") - clientid := flag.String("clientid", hostname+strconv.Itoa(time.Now().Second()), "A clientid for the connection") - username := flag.String("username", "", "A username to authenticate to the MQTT server") - password := flag.String("password", "", "Password to match username") - flag.Parse() - - connOpts := &MQTT.ClientOptions{ - ClientID: *clientid, - CleanSession: true, - Username: *username, - Password: *password, - MaxReconnectInterval: 1 * time.Second, - KeepAlive: 30 * time.Second, - TLSConfig: tls.Config{InsecureSkipVerify: true, ClientAuth: tls.NoClientCert}, - } - connOpts.AddBroker(*server) - connOpts.OnConnect = func(c *MQTT.Client) { - if token := c.Subscribe(*topic, byte(*qos), onMessageReceived); token.Wait() && token.Error() != nil { - panic(token.Error()) - } - } - - client := MQTT.NewClient(connOpts) - if token := client.Connect(); token.Wait() && token.Error() != nil { - panic(token.Error()) - } else { - fmt.Printf("Connected to %s\n", *server) - } - - for { - time.Sleep(1 * time.Second) - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/store.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/store.go deleted file mode 100644 index 4a2ef86eb..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/store.go +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "fmt" - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" - "strconv" -) - -const ( - inboundPrefix = "i." - outboundPrefix = "o." -) - -// Store is an interface which can be used to provide implementations -// for message persistence. -// Because we may have to store distinct messages with the same -// message ID, we need a unique key for each message. This is -// possible by prepending "i." or "o." to each message id -type Store interface { - Open() - Put(string, packets.ControlPacket) - Get(string) packets.ControlPacket - All() []string - Del(string) - Close() - Reset() -} - -// A key MUST have the form "X.[messageid]" -// where X is 'i' or 'o' -func mIDFromKey(key string) uint16 { - s := key[2:] - i, err := strconv.Atoi(s) - chkerr(err) - return uint16(i) -} - -// Return a string of the form "i.[id]" -func inboundKeyFromMID(id uint16) string { - return fmt.Sprintf("%s%d", inboundPrefix, id) -} - -// Return a string of the form "o.[id]" -func outboundKeyFromMID(id uint16) string { - return fmt.Sprintf("%s%d", outboundPrefix, id) -} - -// govern which outgoing messages are persisted -func persistOutbound(s Store, m packets.ControlPacket) { - switch m.Details().Qos { - case 0: - switch m.(type) { - case *packets.PubackPacket, *packets.PubcompPacket: - // Sending puback. delete matching publish - // from ibound - s.Del(inboundKeyFromMID(m.Details().MessageID)) - } - case 1: - switch m.(type) { - case *packets.PublishPacket, *packets.PubrelPacket, *packets.SubscribePacket, *packets.UnsubscribePacket: - // Sending publish. store in obound - // until puback received - s.Put(outboundKeyFromMID(m.Details().MessageID), m) - default: - chkcond(false) - } - case 2: - switch m.(type) { - case *packets.PublishPacket: - // Sending publish. store in obound - // until pubrel received - s.Put(outboundKeyFromMID(m.Details().MessageID), m) - default: - chkcond(false) - } - } -} - -// govern which incoming messages are persisted -func persistInbound(s Store, m packets.ControlPacket) { - switch m.Details().Qos { - case 0: - switch m.(type) { - case *packets.PubackPacket, *packets.SubackPacket, *packets.UnsubackPacket, *packets.PubcompPacket: - // Received a puback. delete matching publish - // from obound - s.Del(outboundKeyFromMID(m.Details().MessageID)) - case *packets.PublishPacket, *packets.PubrecPacket, *packets.PingrespPacket, *packets.ConnackPacket: - default: - chkcond(false) - } - case 1: - switch m.(type) { - case *packets.PublishPacket, *packets.PubrelPacket: - // Received a publish. store it in ibound - // until puback sent - s.Put(inboundKeyFromMID(m.Details().MessageID), m) - default: - chkcond(false) - } - case 2: - switch m.(type) { - case *packets.PublishPacket: - // Received a publish. store it in ibound - // until pubrel received - s.Put(inboundKeyFromMID(m.Details().MessageID), m) - default: - chkcond(false) - } - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/token.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/token.go deleted file mode 100644 index 7644353b1..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/token.go +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright (c) 2014 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Allan Stockdill-Mander - */ - -package mqtt - -import ( - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" - "sync" - "time" -) - -//PacketAndToken is a struct that contains both a ControlPacket and a -//Token. This struct is passed via channels between the client interface -//code and the underlying code responsible for sending and receiving -//MQTT messages. -type PacketAndToken struct { - p packets.ControlPacket - t Token -} - -//Token defines the interface for the tokens used to indicate when -//actions have completed. -type Token interface { - Wait() bool - WaitTimeout(time.Duration) bool - flowComplete() - Error() error -} - -type baseToken struct { - m sync.RWMutex - complete chan struct{} - ready bool - err error -} - -// Wait will wait indefinitely for the Token to complete, ie the Publish -// to be sent and confirmed receipt from the broker -func (b *baseToken) Wait() bool { - b.m.Lock() - defer b.m.Unlock() - if !b.ready { - <-b.complete - b.ready = true - } - return b.ready -} - -// WaitTimeout takes a time in ms to wait for the flow associated with the -// Token to complete, returns true if it returned before the timeout or -// returns false if the timeout occurred. In the case of a timeout the Token -// does not have an error set in case the caller wishes to wait again -func (b *baseToken) WaitTimeout(d time.Duration) bool { - b.m.Lock() - defer b.m.Unlock() - if !b.ready { - select { - case <-b.complete: - b.ready = true - case <-time.After(d): - } - } - return b.ready -} - -func (b *baseToken) flowComplete() { - close(b.complete) -} - -func (b *baseToken) Error() error { - b.m.RLock() - defer b.m.RUnlock() - return b.err -} - -func newToken(tType byte) Token { - switch tType { - case packets.Connect: - return &ConnectToken{baseToken: baseToken{complete: make(chan struct{})}} - case packets.Subscribe: - return &SubscribeToken{baseToken: baseToken{complete: make(chan struct{})}, subResult: make(map[string]byte)} - case packets.Publish: - return &PublishToken{baseToken: baseToken{complete: make(chan struct{})}} - case packets.Unsubscribe: - return &UnsubscribeToken{baseToken: baseToken{complete: make(chan struct{})}} - case packets.Disconnect: - return &DisconnectToken{baseToken: baseToken{complete: make(chan struct{})}} - } - return nil -} - -//ConnectToken is an extension of Token containing the extra fields -//required to provide information about calls to Connect() -type ConnectToken struct { - baseToken - returnCode byte -} - -//ReturnCode returns the acknowlegement code in the connack sent -//in response to a Connect() -func (c *ConnectToken) ReturnCode() byte { - c.m.RLock() - defer c.m.RUnlock() - return c.returnCode -} - -//PublishToken is an extension of Token containing the extra fields -//required to provide information about calls to Publish() -type PublishToken struct { - baseToken - messageID uint16 -} - -//MessageID returns the MQTT message ID that was assigned to the -//Publish packet when it was sent to the broker -func (p *PublishToken) MessageID() uint16 { - return p.messageID -} - -//SubscribeToken is an extension of Token containing the extra fields -//required to provide information about calls to Subscribe() -type SubscribeToken struct { - baseToken - subs []string - subResult map[string]byte -} - -//Result returns a map of topics that were subscribed to along with -//the matching return code from the broker. This is either the Qos -//value of the subscription or an error code. -func (s *SubscribeToken) Result() map[string]byte { - s.m.RLock() - defer s.m.RUnlock() - return s.subResult -} - -//UnsubscribeToken is an extension of Token containing the extra fields -//required to provide information about calls to Unsubscribe() -type UnsubscribeToken struct { - baseToken -} - -//DisconnectToken is an extension of Token containing the extra fields -//required to provide information about calls to Disconnect() -type DisconnectToken struct { - baseToken -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/topic.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/topic.go deleted file mode 100644 index ffe796d28..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/topic.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2014 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "errors" - "strings" -) - -//InvalidQos is the error returned when an packet is to be sent -//with an invalid Qos value -var ErrInvalidQos = errors.New("Invalid QoS") - -//InvalidTopicEmptyString is the error returned when a topic string -//is passed in that is 0 length -var ErrInvalidTopicEmptyString = errors.New("Invalid Topic; empty string") - -//InvalidTopicMultilevel is the error returned when a topic string -//is passed in that has the multi level wildcard in any position but -//the last -var ErrInvalidTopicMultilevel = errors.New("Invalid Topic; multi-level wildcard must be last level") - -// Topic Names and Topic Filters -// The MQTT v3.1.1 spec clarifies a number of ambiguities with regard -// to the validity of Topic strings. -// - A Topic must be between 1 and 65535 bytes. -// - A Topic is case sensitive. -// - A Topic may contain whitespace. -// - A Topic containing a leading forward slash is different than a Topic without. -// - A Topic may be "/" (two levels, both empty string). -// - A Topic must be UTF-8 encoded. -// - A Topic may contain any number of levels. -// - A Topic may contain an empty level (two forward slashes in a row). -// - A TopicName may not contain a wildcard. -// - A TopicFilter may only have a # (multi-level) wildcard as the last level. -// - A TopicFilter may contain any number of + (single-level) wildcards. -// - A TopicFilter with a # will match the absense of a level -// Example: a subscription to "foo/#" will match messages published to "foo". - -func validateSubscribeMap(subs map[string]byte) ([]string, []byte, error) { - var topics []string - var qoss []byte - for topic, qos := range subs { - if err := validateTopicAndQos(topic, qos); err != nil { - return nil, nil, err - } - topics = append(topics, topic) - qoss = append(qoss, qos) - } - - return topics, qoss, nil -} - -func validateTopicAndQos(topic string, qos byte) error { - if len(topic) == 0 { - return ErrInvalidTopicEmptyString - } - - levels := strings.Split(topic, "/") - for i, level := range levels { - if level == "#" && i != len(levels)-1 { - return ErrInvalidTopicMultilevel - } - } - - if qos < 0 || qos > 2 { - return ErrInvalidQos - } - return nil -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/trace.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/trace.go deleted file mode 100644 index 2f5a01466..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/trace.go +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "io/ioutil" - "log" -) - -// Internal levels of library output that are initialised to not print -// anything but can be overridden by programmer -var ( - ERROR *log.Logger - CRITICAL *log.Logger - WARN *log.Logger - DEBUG *log.Logger -) - -func init() { - ERROR = log.New(ioutil.Discard, "", 0) - CRITICAL = log.New(ioutil.Discard, "", 0) - WARN = log.New(ioutil.Discard, "", 0) - DEBUG = log.New(ioutil.Discard, "", 0) -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_client_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_client_test.go deleted file mode 100644 index 59f7a4500..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_client_test.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "log" - "net/http" - "os" - "testing" - - _ "net/http/pprof" -) - -func init() { - DEBUG = log.New(os.Stderr, "DEBUG ", log.Ltime) - WARN = log.New(os.Stderr, "WARNING ", log.Ltime) - CRITICAL = log.New(os.Stderr, "CRITICAL ", log.Ltime) - ERROR = log.New(os.Stderr, "ERROR ", log.Ltime) - - go func() { - log.Println(http.ListenAndServe("localhost:6060", nil)) - }() -} - -func Test_NewClient_simple(t *testing.T) { - ops := NewClientOptions().SetClientID("foo").AddBroker("tcp://10.10.0.1:1883") - c := NewClient(ops) - - if c == nil { - t.Fatalf("ops is nil") - } - - if c.options.ClientID != "foo" { - t.Fatalf("bad client id") - } - - if c.options.Servers[0].Scheme != "tcp" { - t.Fatalf("bad server scheme") - } - - if c.options.Servers[0].Host != "10.10.0.1:1883" { - t.Fatalf("bad server host") - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_messageids_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_messageids_test.go deleted file mode 100644 index 9d941f697..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_messageids_test.go +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "fmt" - "testing" - "time" -) - -type DummyToken struct{} - -func (d *DummyToken) Wait() bool { - return true -} - -func (d *DummyToken) WaitTimeout(t time.Duration) bool { - return true -} - -func (d *DummyToken) flowComplete() {} - -func (d *DummyToken) Error() error { - return nil -} - -func Test_getID(t *testing.T) { - mids := &messageIds{index: make(map[uint16]Token)} - - i1 := mids.getID(&DummyToken{}) - - if i1 != 1 { - t.Fatalf("i1 was wrong: %v", i1) - } - - i2 := mids.getID(&DummyToken{}) - - if i2 != 2 { - t.Fatalf("i2 was wrong: %v", i2) - } - - for i := uint16(3); i < 100; i++ { - id := mids.getID(&DummyToken{}) - if id != i { - t.Fatalf("id was wrong expected %v got %v", i, id) - } - } -} - -func Test_freeID(t *testing.T) { - mids := &messageIds{index: make(map[uint16]Token)} - - i1 := mids.getID(&DummyToken{}) - mids.freeID(i1) - - if i1 != 1 { - t.Fatalf("i1 was wrong: %v", i1) - } - - i2 := mids.getID(&DummyToken{}) - fmt.Printf("i2: %v\n", i2) -} - -func Test_messageids_mix(t *testing.T) { - mids := &messageIds{index: make(map[uint16]Token)} - - done := make(chan bool) - a := make(chan uint16, 3) - b := make(chan uint16, 20) - c := make(chan uint16, 100) - - go func() { - for i := 0; i < 10000; i++ { - a <- mids.getID(&DummyToken{}) - mids.freeID(<-b) - } - done <- true - }() - - go func() { - for i := 0; i < 10000; i++ { - b <- mids.getID(&DummyToken{}) - mids.freeID(<-c) - } - done <- true - }() - - go func() { - for i := 0; i < 10000; i++ { - c <- mids.getID(&DummyToken{}) - mids.freeID(<-a) - } - done <- true - }() - - <-done - <-done - <-done -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_options_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_options_test.go deleted file mode 100644 index d4181442e..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_options_test.go +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "crypto/tls" - "crypto/x509" - "testing" - "time" -) - -func Test_NewClientOptions_default(t *testing.T) { - o := NewClientOptions() - - if o.ClientID != "" { - t.Fatalf("bad default client id") - } - - if o.Username != "" { - t.Fatalf("bad default username") - } - - if o.Password != "" { - t.Fatalf("bad default password") - } - - if o.KeepAlive != 30*time.Second { - t.Fatalf("bad default timeout") - } -} - -func Test_NewClientOptions_mix(t *testing.T) { - o := NewClientOptions() - o.AddBroker("tcp://192.168.1.2:9999") - o.SetClientID("myclientid") - o.SetUsername("myuser") - o.SetPassword("mypassword") - o.SetKeepAlive(88) - - if o.Servers[0].Scheme != "tcp" { - t.Fatalf("bad scheme") - } - - if o.Servers[0].Host != "192.168.1.2:9999" { - t.Fatalf("bad host") - } - - if o.ClientID != "myclientid" { - t.Fatalf("bad set clientid") - } - - if o.Username != "myuser" { - t.Fatalf("bad set username") - } - - if o.Password != "mypassword" { - t.Fatalf("bad set password") - } - - if o.KeepAlive != 88 { - t.Fatalf("bad set timeout") - } -} - -func Test_ModifyOptions(t *testing.T) { - o := NewClientOptions() - o.AddBroker("tcp://3.3.3.3:12345") - c := NewClient(o) - o.AddBroker("ws://2.2.2.2:9999") - o.SetOrderMatters(false) - - if c.options.Servers[0].Scheme != "tcp" { - t.Fatalf("client options.server.Scheme was modified") - } - - // if c.options.server.Host != "2.2.2.2:9999" { - // t.Fatalf("client options.server.Host was modified") - // } - - if o.Order != false { - t.Fatalf("options.order was not modified") - } -} - -func Test_TLSConfig(t *testing.T) { - o := NewClientOptions().SetTLSConfig(&tls.Config{ - RootCAs: x509.NewCertPool(), - ClientAuth: tls.NoClientCert, - ClientCAs: x509.NewCertPool(), - InsecureSkipVerify: true}) - - c := NewClient(o) - - if c.options.TLSConfig.ClientAuth != tls.NoClientCert { - t.Fatalf("client options.tlsConfig ClientAuth incorrect") - } - - if c.options.TLSConfig.InsecureSkipVerify != true { - t.Fatalf("client options.tlsConfig InsecureSkipVerify incorrect") - } -} - -func Test_OnConnectionLost(t *testing.T) { - onconnlost := func(client *Client, err error) { - panic(err) - } - o := NewClientOptions().SetConnectionLostHandler(onconnlost) - - c := NewClient(o) - - if c.options.OnConnectionLost == nil { - t.Fatalf("client options.onconnlost was nil") - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_ping_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_ping_test.go deleted file mode 100644 index 2ac8831da..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_ping_test.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "bytes" - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" - "testing" -) - -func Test_NewPingReqMessage(t *testing.T) { - pr := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket) - if pr.MessageType != packets.Pingreq { - t.Errorf("NewPingReqMessage bad msg type: %v", pr.MessageType) - } - if pr.RemainingLength != 0 { - t.Errorf("NewPingReqMessage bad remlen, expected 0, got %d", pr.RemainingLength) - } - - exp := []byte{ - 0xC0, - 0x00, - } - - var buf bytes.Buffer - pr.Write(&buf) - bs := buf.Bytes() - - if len(bs) != 2 { - t.Errorf("NewPingReqMessage.Bytes() wrong length: %d", len(bs)) - } - - if exp[0] != bs[0] || exp[1] != bs[1] { - t.Errorf("NewPingMessage.Bytes() wrong") - } -} - -func Test_DecodeMessage_pingresp(t *testing.T) { - bs := bytes.NewBuffer([]byte{ - 0xD0, - 0x00, - }) - presp, _ := packets.ReadPacket(bs) - if presp.(*packets.PingrespPacket).MessageType != packets.Pingresp { - t.Errorf("DecodeMessage ping response wrong msg type: %v", presp.(*packets.PingrespPacket).MessageType) - } - if presp.(*packets.PingrespPacket).RemainingLength != 0 { - t.Errorf("DecodeMessage ping response wrong rem len: %d", presp.(*packets.PingrespPacket).RemainingLength) - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_router_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_router_test.go deleted file mode 100644 index 48e6e392b..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_router_test.go +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" - "testing" -) - -func Test_newRouter(t *testing.T) { - router, stop := newRouter() - if router == nil { - t.Fatalf("router is nil") - } - if stop == nil { - t.Fatalf("stop is nil") - } - if router.routes.Len() != 0 { - t.Fatalf("router.routes was not empty") - } -} - -func Test_AddRoute(t *testing.T) { - router, _ := newRouter() - calledback := false - cb := func(client *Client, msg Message) { - calledback = true - } - router.addRoute("/alpha", cb) - - if router.routes.Len() != 1 { - t.Fatalf("router.routes was wrong") - } -} - -func Test_Match(t *testing.T) { - router, _ := newRouter() - router.addRoute("/alpha", nil) - - if !router.routes.Front().Value.(*route).match("/alpha") { - t.Fatalf("match function is bad") - } - - if router.routes.Front().Value.(*route).match("alpha") { - t.Fatalf("match function is bad") - } -} - -func Test_match(t *testing.T) { - - check := func(route, topic string, exp bool) { - result := routeIncludesTopic(route, topic) - if exp != result { - t.Errorf("match was bad R: %v, T: %v, EXP: %v", route, topic, exp) - } - } - - // ** Basic ** - R := "" - T := "" - check(R, T, true) - - R = "x" - T = "" - check(R, T, false) - - R = "" - T = "x" - check(R, T, false) - - R = "x" - T = "x" - check(R, T, true) - - R = "x" - T = "X" - check(R, T, false) - - R = "alpha" - T = "alpha" - check(R, T, true) - - R = "alpha" - T = "beta" - check(R, T, false) - - // ** / ** - R = "/" - T = "/" - check(R, T, true) - - R = "/one" - T = "/one" - check(R, T, true) - - R = "/" - T = "/two" - check(R, T, false) - - R = "/two" - T = "/" - check(R, T, false) - - R = "/two" - T = "two" - check(R, T, false) // a leading "/" creates a different topic - - R = "/a/" - T = "/a" - check(R, T, false) - - R = "/a/" - T = "/a/b" - check(R, T, false) - - R = "/a/b" - T = "/a/b" - check(R, T, true) - - R = "/a/b/" - T = "/a/b" - check(R, T, false) - - R = "/a/b" - T = "/R/b" - check(R, T, false) - - // ** + ** - R = "/a/+/c" - T = "/a/b/c" - check(R, T, true) - - R = "/+/b/c" - T = "/a/b/c" - check(R, T, true) - - R = "/a/b/+" - T = "/a/b/c" - check(R, T, true) - - R = "/a/+/+" - T = "/a/b/c" - check(R, T, true) - - R = "/+/+/+" - T = "/a/b/c" - check(R, T, true) - - R = "/+/+/c" - T = "/a/b/c" - check(R, T, true) - - R = "/a/b/c/+" // different number of levels - T = "/a/b/c" - check(R, T, false) - - R = "+" - T = "a" - check(R, T, true) - - R = "/+" - T = "a" - check(R, T, false) - - R = "+/+" - T = "/a" - check(R, T, true) - - R = "+/+" - T = "a" - check(R, T, false) - - // ** # ** - R = "#" - T = "/a/b/c" - check(R, T, true) - - R = "/#" - T = "/a/b/c" - check(R, T, true) - - // R = "/#/" // not valid - // T = "/a/b/c" - // check(R, T, true) - - R = "/#" - T = "/a/b/c" - check(R, T, true) - - R = "/a/#" - T = "/a/b/c" - check(R, T, true) - - R = "/a/#" - T = "/a/b/c" - check(R, T, true) - - R = "/a/b/#" - T = "/a/b/c" - check(R, T, true) - - // ** unicode ** - R = "☃" - T = "☃" - check(R, T, true) - - R = "✈" - T = "☃" - check(R, T, false) - - R = "/☃/✈" - T = "/☃/ッ" - check(R, T, false) - - R = "#" - T = "/☃/ッ" - check(R, T, true) - - R = "/☃/+" - T = "/☃/ッ/♫/ø/☹☹☹" - check(R, T, false) - - R = "/☃/#" - T = "/☃/ッ/♫/ø/☹☹☹" - check(R, T, true) - - R = "/☃/ッ/♫/ø/+" - T = "/☃/ッ/♫/ø/☹☹☹" - check(R, T, true) - - R = "/☃/ッ/+/ø/☹☹☹" - T = "/☃/ッ/♫/ø/☹☹☹" - check(R, T, true) - - R = "/+/a/ッ/+/ø/☹☹☹" - T = "/b/♫/ッ/♫/ø/☹☹☹" - check(R, T, false) - - R = "/+/♫/ッ/+/ø/☹☹☹" - T = "/b/♫/ッ/♫/ø/☹☹☹" - check(R, T, true) -} - -func Test_MatchAndDispatch(t *testing.T) { - calledback := make(chan bool) - - cb := func(c *Client, m Message) { - calledback <- true - } - - pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pub.Qos = 2 - pub.TopicName = "a" - pub.Payload = []byte("foo") - - msgs := make(chan *packets.PublishPacket) - - router, stopper := newRouter() - router.addRoute("a", cb) - - router.matchAndDispatch(msgs, true, nil) - - msgs <- pub - - <-calledback - - stopper <- true - - select { - case msgs <- pub: - t.Errorf("msgs should not have a listener") - default: - } - -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_store_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_store_test.go deleted file mode 100644 index 42e7d7ceb..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_store_test.go +++ /dev/null @@ -1,668 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "bufio" - "fmt" - "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" - "io/ioutil" - "os" - "testing" -) - -func Test_fullpath(t *testing.T) { - p := fullpath("/tmp/store", "o.44324") - e := "/tmp/store/o.44324.msg" - if p != e { - t.Fatalf("full path expected %s, got %s", e, p) - } -} - -func Test_exists(t *testing.T) { - b := exists("/") - if !b { - t.Errorf("/proc/cpuinfo was not found") - } -} - -func Test_exists_no(t *testing.T) { - b := exists("/this/path/is/not/real/i/hope") - if b { - t.Errorf("you have some strange files") - } -} - -func isemptydir(dir string) bool { - chkcond(exists(dir)) - files, err := ioutil.ReadDir(dir) - chkerr(err) - return len(files) == 0 -} - -func Test_mIDFromKey(t *testing.T) { - key := "i.123" - exp := uint16(123) - res := mIDFromKey(key) - if exp != res { - t.Fatalf("mIDFromKey failed") - } -} - -func Test_inboundKeyFromMID(t *testing.T) { - id := uint16(9876) - exp := "i.9876" - res := inboundKeyFromMID(id) - if exp != res { - t.Fatalf("inboundKeyFromMID failed") - } -} - -func Test_outboundKeyFromMID(t *testing.T) { - id := uint16(7654) - exp := "o.7654" - res := outboundKeyFromMID(id) - if exp != res { - t.Fatalf("outboundKeyFromMID failed") - } -} - -/************************ - **** persistOutbound **** - ************************/ - -func Test_persistOutbound_connect(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket) - m.Qos = 0 - m.Username = "user" - m.Password = []byte("pass") - m.ClientIdentifier = "cid" - //m := newConnectMsg(false, false, QOS_ZERO, false, "", nil, "cid", "user", "pass", 10) - persistOutbound(ts, m) - - if len(ts.mput) != 0 { - t.Fatalf("persistOutbound put message it should not have") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistOutbound get message it should not have") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistOutbound del message it should not have") - } -} - -func Test_persistOutbound_publish_0(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - m.Qos = 0 - m.TopicName = "/popub0" - m.Payload = []byte{0xBB, 0x00} - m.MessageID = 40 - persistOutbound(ts, m) - - if len(ts.mput) != 0 { - t.Fatalf("persistOutbound put message it should not have") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistOutbound get message it should not have") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistOutbound del message it should not have") - } -} - -func Test_persistOutbound_publish_1(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - m.Qos = 1 - m.TopicName = "/popub1" - m.Payload = []byte{0xBB, 0x00} - m.MessageID = 41 - persistOutbound(ts, m) - - if len(ts.mput) != 1 || ts.mput[0] != 41 { - t.Fatalf("persistOutbound put message it should not have") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistOutbound get message it should not have") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistOutbound del message it should not have") - } -} - -func Test_persistOutbound_publish_2(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - m.Qos = 2 - m.TopicName = "/popub2" - m.Payload = []byte{0xBB, 0x00} - m.MessageID = 42 - persistOutbound(ts, m) - - if len(ts.mput) != 1 || ts.mput[0] != 42 { - t.Fatalf("persistOutbound put message it should not have") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistOutbound get message it should not have") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistOutbound del message it should not have") - } -} - -func Test_persistOutbound_puback(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket) - persistOutbound(ts, m) - - if len(ts.mput) != 0 { - t.Fatalf("persistOutbound put message it should not have") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistOutbound get message it should not have") - } - - if len(ts.mdel) != 1 { - t.Fatalf("persistOutbound del message it should not have") - } -} - -func Test_persistOutbound_pubrec(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket) - persistOutbound(ts, m) - - if len(ts.mput) != 0 { - t.Fatalf("persistOutbound put message it should not have") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistOutbound get message it should not have") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistOutbound del message it should not have") - } -} - -func Test_persistOutbound_pubrel(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket) - m.MessageID = 43 - - persistOutbound(ts, m) - - if len(ts.mput) != 1 || ts.mput[0] != 43 { - t.Fatalf("persistOutbound put message it should not have") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistOutbound get message it should not have") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistOutbound del message it should not have") - } -} - -func Test_persistOutbound_pubcomp(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket) - persistOutbound(ts, m) - - if len(ts.mput) != 0 { - t.Fatalf("persistOutbound put message it should not have") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistOutbound get message it should not have") - } - - if len(ts.mdel) != 1 { - t.Fatalf("persistOutbound del message it should not have") - } -} - -func Test_persistOutbound_subscribe(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket) - m.Topics = []string{"/posub"} - m.Qoss = []byte{1} - m.MessageID = 44 - persistOutbound(ts, m) - - if len(ts.mput) != 1 || ts.mput[0] != 44 { - t.Fatalf("persistOutbound put message it should not have") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistOutbound get message it should not have") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistOutbound del message it should not have") - } -} - -func Test_persistOutbound_unsubscribe(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket) - m.Topics = []string{"/posub"} - m.MessageID = 45 - persistOutbound(ts, m) - - if len(ts.mput) != 1 || ts.mput[0] != 45 { - t.Fatalf("persistOutbound put message it should not have") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistOutbound get message it should not have") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistOutbound del message it should not have") - } -} - -func Test_persistOutbound_pingreq(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Pingreq) - persistOutbound(ts, m) - - if len(ts.mput) != 0 { - t.Fatalf("persistOutbound put message it should not have") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistOutbound get message it should not have") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistOutbound del message it should not have") - } -} - -func Test_persistOutbound_disconnect(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Disconnect) - persistOutbound(ts, m) - - if len(ts.mput) != 0 { - t.Fatalf("persistOutbound put message it should not have") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistOutbound get message it should not have") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistOutbound del message it should not have") - } -} - -/************************ - **** persistInbound **** - ************************/ - -func Test_persistInbound_connack(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Connack) - persistInbound(ts, m) - - if len(ts.mput) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistInbound in bad state") - } -} - -func Test_persistInbound_publish_0(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - m.Qos = 0 - m.TopicName = "/pipub0" - m.Payload = []byte{0xCC, 0x01} - m.MessageID = 50 - persistInbound(ts, m) - - if len(ts.mput) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistInbound in bad state") - } -} - -func Test_persistInbound_publish_1(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - m.Qos = 1 - m.TopicName = "/pipub1" - m.Payload = []byte{0xCC, 0x02} - m.MessageID = 51 - persistInbound(ts, m) - - if len(ts.mput) != 1 || ts.mput[0] != 51 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistInbound in bad state") - } -} - -func Test_persistInbound_publish_2(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - m.Qos = 2 - m.TopicName = "/pipub2" - m.Payload = []byte{0xCC, 0x03} - m.MessageID = 52 - persistInbound(ts, m) - - if len(ts.mput) != 1 || ts.mput[0] != 52 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistInbound in bad state") - } -} - -func Test_persistInbound_puback(t *testing.T) { - ts := &TestStore{} - pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pub.Qos = 1 - pub.TopicName = "/pub1" - pub.Payload = []byte{0xCC, 0x04} - pub.MessageID = 53 - publishKey := inboundKeyFromMID(pub.MessageID) - ts.Put(publishKey, pub) - - m := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket) - m.MessageID = 53 - - persistInbound(ts, m) // "deletes" packets.Publish from store - - if len(ts.mput) != 1 { // not actually deleted in TestStore - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mdel) != 1 || ts.mdel[0] != 53 { - t.Fatalf("persistInbound in bad state") - } -} - -func Test_persistInbound_pubrec(t *testing.T) { - ts := &TestStore{} - pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pub.Qos = 2 - pub.TopicName = "/pub2" - pub.Payload = []byte{0xCC, 0x05} - pub.MessageID = 54 - publishKey := inboundKeyFromMID(pub.MessageID) - ts.Put(publishKey, pub) - - m := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket) - m.MessageID = 54 - - persistInbound(ts, m) - - if len(ts.mput) != 1 || ts.mput[0] != 54 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistInbound in bad state") - } -} - -func Test_persistInbound_pubrel(t *testing.T) { - ts := &TestStore{} - pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) - pub.Qos = 2 - pub.TopicName = "/pub2" - pub.Payload = []byte{0xCC, 0x06} - pub.MessageID = 55 - publishKey := inboundKeyFromMID(pub.MessageID) - ts.Put(publishKey, pub) - - m := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket) - m.MessageID = 55 - - persistInbound(ts, m) // will overwrite publish - - if len(ts.mput) != 2 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistInbound in bad state") - } -} - -func Test_persistInbound_pubcomp(t *testing.T) { - ts := &TestStore{} - - m := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket) - m.MessageID = 56 - - persistInbound(ts, m) - - if len(ts.mput) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mdel) != 1 || ts.mdel[0] != 56 { - t.Fatalf("persistInbound in bad state") - } -} - -func Test_persistInbound_suback(t *testing.T) { - ts := &TestStore{} - - m := packets.NewControlPacket(packets.Suback).(*packets.SubackPacket) - m.MessageID = 57 - - persistInbound(ts, m) - - if len(ts.mput) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mdel) != 1 || ts.mdel[0] != 57 { - t.Fatalf("persistInbound in bad state") - } -} - -func Test_persistInbound_unsuback(t *testing.T) { - ts := &TestStore{} - - m := packets.NewControlPacket(packets.Unsuback).(*packets.UnsubackPacket) - m.MessageID = 58 - - persistInbound(ts, m) - - if len(ts.mput) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mdel) != 1 || ts.mdel[0] != 58 { - t.Fatalf("persistInbound in bad state") - } -} - -func Test_persistInbound_pingresp(t *testing.T) { - ts := &TestStore{} - m := packets.NewControlPacket(packets.Pingresp) - - persistInbound(ts, m) - - if len(ts.mput) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mget) != 0 { - t.Fatalf("persistInbound in bad state") - } - - if len(ts.mdel) != 0 { - t.Fatalf("persistInbound in bad state") - } -} - -/*********** - * restore * - ***********/ - -func ensureRestoreDir() { - if exists("/tmp/restore") { - rerr := os.RemoveAll("/tmp/restore") - chkerr(rerr) - } - os.Mkdir("/tmp/restore", 0766) -} - -func writeToRestore(fname, content string) { - f, cerr := os.Create("/tmp/restore/" + fname) - chkerr(cerr) - chkcond(f != nil) - w := bufio.NewWriter(f) - w.Write([]byte(content)) - w.Flush() - f.Close() -} - -func verifyFromRestore(fname, content string, t *testing.T) { - msg, oerr := os.Open("/tmp/restore/" + fname) - chkerr(oerr) - all, rerr := ioutil.ReadAll(msg) - chkerr(rerr) - msg.Close() - s := string(all) - if s != content { - t.Fatalf("verify content expected `%s` but got `%s`", content, s) - } -} - -func Test_restore_1(t *testing.T) { - ensureRestoreDir() - - writeToRestore("i.1.bkp", "this is critical 1") - - restore("/tmp/restore") - - chkcond(!exists("/tmp/restore/i.1.bkp")) - chkcond(exists("/tmp/restore/i.1.msg")) - - verifyFromRestore("i.1.msg", "this is critical 1", t) -} - -func Test_restore_2(t *testing.T) { - ensureRestoreDir() - - writeToRestore("o.2.msg", "this is critical 2") - - restore("/tmp/restore") - - chkcond(!exists("/tmp/restore/o.2.bkp")) - chkcond(exists("/tmp/restore/o.2.msg")) - - verifyFromRestore("o.2.msg", "this is critical 2", t) -} - -func Test_restore_3(t *testing.T) { - ensureRestoreDir() - - N := 20 - // evens are .msg - // odds are .bkp - for i := 0; i < N; i++ { - content := fmt.Sprintf("foo %d bar", i) - if i%2 == 0 { - mname := fmt.Sprintf("i.%d.msg", i) - writeToRestore(mname, content) - } else { - mname := fmt.Sprintf("i.%d.bkp", i) - writeToRestore(mname, content) - } - } - - restore("/tmp/restore") - - for i := 0; i < N; i++ { - mname := fmt.Sprintf("i.%d.msg", i) - bname := fmt.Sprintf("i.%d.bkp", i) - content := fmt.Sprintf("foo %d bar", i) - chkcond(!exists("/tmp/restore/" + bname)) - chkcond(exists("/tmp/restore/" + mname)) - - verifyFromRestore(mname, content, t) - } -} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_topic_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_topic_test.go deleted file mode 100644 index da2b240e8..000000000 --- a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_topic_test.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2013 IBM Corp. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the Eclipse Public License v1.0 - * which accompanies this distribution, and is available at - * http://www.eclipse.org/legal/epl-v10.html - * - * Contributors: - * Seth Hoenig - * Allan Stockdill-Mander - * Mike Robertson - */ - -package mqtt - -import ( - "testing" -) - -func Test_ValidateTopicAndQos_qos3(t *testing.T) { - e := validateTopicAndQos("a", 3) - if e != ErrInvalidQos { - t.Fatalf("invalid error for invalid qos") - } -} - -func Test_ValidateTopicAndQos_ES(t *testing.T) { - e := validateTopicAndQos("", 0) - if e != ErrInvalidTopicEmptyString { - t.Fatalf("invalid error for empty topic name") - } -} - -func Test_ValidateTopicAndQos_a_0(t *testing.T) { - e := validateTopicAndQos("a", 0) - if e != nil { - t.Fatalf("error from valid NewTopicFilter") - } -} - -func Test_ValidateTopicAndQos_H(t *testing.T) { - e := validateTopicAndQos("a/#/c", 0) - if e != ErrInvalidTopicMultilevel { - t.Fatalf("invalid error for bad multilevel topic filter") - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/.gitignore deleted file mode 100644 index 3591f9ff3..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so -*.test - -# Folders -_obj -_test -.vagrant - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/.travis.yml b/Godeps/_workspace/src/github.com/Shopify/sarama/.travis.yml deleted file mode 100644 index a9e5cc3cb..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/.travis.yml +++ /dev/null @@ -1,41 +0,0 @@ -language: go -go: -- 1.3.3 -- 1.4.2 - -env: - global: - - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095 - - TOXIPROXY_ADDR=http://localhost:8474 - - KAFKA_INSTALL_ROOT=/home/travis/kafka - - KAFKA_HOSTNAME=localhost - - DEBUG=true - matrix: - - KAFKA_VERSION=0.8.1.1 - - KAFKA_VERSION=0.8.2.1 - -before_install: -- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR} -- vagrant/install_cluster.sh -- vagrant/boot_cluster.sh -- vagrant/create_topics.sh - - -install: -- make install_dependencies - -script: -- make test -- make vet -- make errcheck -- make fmt - -matrix: - include: - - go: tip - env: KAFKA_VERSION=0.8.2.1 - allow_failures: - - go: tip - fast_finish: true - -sudo: false diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/CHANGELOG.md b/Godeps/_workspace/src/github.com/Shopify/sarama/CHANGELOG.md deleted file mode 100644 index 5bea6bc3b..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/CHANGELOG.md +++ /dev/null @@ -1,157 +0,0 @@ -# Changelog - -#### Version 1.5.0 (unreleased) - -New Features: - - TLS-encrypted network connections are now supported. This feature is subject - to change when Kafka releases built-in TLS support, but for now this is - enough to work with TLS-terminating proxies - ([#154](https://github.com/Shopify/sarama/pull/154)). - -Improvements: - - The consumer will not block if a single partition is not drained by the user; - all other partitions will continue to consume normally - ([#485](https://github.com/Shopify/sarama/pull/485)). - - Formatting of error strings has been much improved - ([#495](https://github.com/Shopify/sarama/pull/495)). - - Internal refactoring of the producer for code cleanliness and to enable - future work ([#300](https://github.com/Shopify/sarama/pull/300)). - -Bug Fixes: - - Fix a potential deadlock in the consumer on shutdown - ([#475](https://github.com/Shopify/sarama/pull/475)). - -#### Version 1.4.3 (2015-07-21) - -Bug Fixes: - - Don't include the partitioner in the producer's "fetch partitions" - circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)). - - Don't retry messages until the broker is closed when abandoning a broker in - the producer ([#468](https://github.com/Shopify/sarama/pull/468)). - - Update the import path for snappy-go, it has moved again and the API has - changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)). - -#### Version 1.4.2 (2015-05-27) - -Bug Fixes: - - Update the import path for snappy-go, it has moved from google code to github - ([#456](https://github.com/Shopify/sarama/pull/456)). - -#### Version 1.4.1 (2015-05-25) - -Improvements: - - Optimizations when decoding snappy messages, thanks to John Potocny - ([#446](https://github.com/Shopify/sarama/pull/446)). - -Bug Fixes: - - Fix hypothetical race conditions on producer shutdown - ([#450](https://github.com/Shopify/sarama/pull/450), - [#451](https://github.com/Shopify/sarama/pull/451)). - -#### Version 1.4.0 (2015-05-01) - -New Features: - - The consumer now implements `Topics()` and `Partitions()` methods to enable - users to dynamically choose what topics/partitions to consume without - instantiating a full client - ([#431](https://github.com/Shopify/sarama/pull/431)). - - The partition-consumer now exposes the high water mark offset value returned - by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)). - - Added a `kafka-console-consumer` tool capable of handling multiple - partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer` - ([#439](https://github.com/Shopify/sarama/pull/439), - [#442](https://github.com/Shopify/sarama/pull/442)). - -Improvements: - - The producer's logging during retry scenarios is more consistent, more - useful, and slightly less verbose - ([#429](https://github.com/Shopify/sarama/pull/429)). - - The client now shuffles its initial list of seed brokers in order to prevent - thundering herd on the first broker in the list - ([#441](https://github.com/Shopify/sarama/pull/441)). - -Bug Fixes: - - The producer now correctly manages its state if retries occur when it is - shutting down, fixing several instances of confusing behaviour and at least - one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)). - - The consumer now handles messages for different partitions asynchronously, - making it much more resilient to specific user code ordering - ([#325](https://github.com/Shopify/sarama/pull/325)). - -#### Version 1.3.0 (2015-04-16) - -New Features: - - The client now tracks consumer group coordinators using - ConsumerMetadataRequests similar to how it tracks partition leadership using - regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)). - This adds two methods to the client API: - - `Coordinator(consumerGroup string) (*Broker, error)` - - `RefreshCoordinator(consumerGroup string) error` - -Improvements: - - ConsumerMetadataResponses now automatically create a Broker object out of the - ID/address/port combination for the Coordinator; accessing the fields - individually has been deprecated - ([#413](https://github.com/Shopify/sarama/pull/413)). - - Much improved handling of `OffsetOutOfRange` errors in the consumer. - Consumers will fail to start if the provided offset is out of range - ([#418](https://github.com/Shopify/sarama/pull/418)) - and they will automatically shut down if the offset falls out of range - ([#424](https://github.com/Shopify/sarama/pull/424)). - - Small performance improvement in encoding and decoding protocol messages - ([#427](https://github.com/Shopify/sarama/pull/427)). - -Bug Fixes: - - Fix a rare race condition in the client's background metadata refresher if - it happens to be activated while the client is being closed - ([#422](https://github.com/Shopify/sarama/pull/422)). - -#### Version 1.2.0 (2015-04-07) - -Improvements: - - The producer's behaviour when `Flush.Frequency` is set is now more intuitive - ([#389](https://github.com/Shopify/sarama/pull/389)). - - The producer is now somewhat more memory-efficient during and after retrying - messages due to an improved queue implementation - ([#396](https://github.com/Shopify/sarama/pull/396)). - - The consumer produces much more useful logging output when leadership - changes ([#385](https://github.com/Shopify/sarama/pull/385)). - - The client's `GetOffset` method will now automatically refresh metadata and - retry once in the event of stale information or similar - ([#394](https://github.com/Shopify/sarama/pull/394)). - - Broker connections now have support for using TCP keepalives - ([#407](https://github.com/Shopify/sarama/issues/407)). - -Bug Fixes: - - The OffsetCommitRequest message now correctly implements all three possible - API versions ([#390](https://github.com/Shopify/sarama/pull/390), - [#400](https://github.com/Shopify/sarama/pull/400)). - -#### Version 1.1.0 (2015-03-20) - -Improvements: - - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly - broken topics don't choke throughput - ([#373](https://github.com/Shopify/sarama/pull/373)). - -Bug Fixes: - - Fix the producer's internal reference counting in certain unusual scenarios - ([#367](https://github.com/Shopify/sarama/pull/367)). - - Fix the consumer's internal reference counting in certain unusual scenarios - ([#369](https://github.com/Shopify/sarama/pull/369)). - - Fix a condition where the producer's internal control messages could have - gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)). - - Fix an issue where invalid partition lists would be cached when asking for - metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)). - - -#### Version 1.0.0 (2015-03-17) - -Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are: - -- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking. -- The consumer has been rewritten to only open one connection per broker instead of one connection per partition. -- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package. -- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you. -- All the configuration values have been unified in the `Config` struct. -- Much improved test suite. diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/Shopify/sarama/CONTRIBUTING.md deleted file mode 100644 index b0f107cbc..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing - -Contributions are always welcome, both reporting issues and submitting pull requests! - -### Reporting issues - -Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth. - -- What SHA of Sarama are you running? If this is not the latest SHA on the master branch, please try if the problem persists with the latest version. -- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description. -- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it. - -Also, please include the following information about your environment, so we can help you faster: - -- What version of Kafka are you using? -- What version of Go are you using? -- What are the values of your Producer/Consumer/Client configuration? - - -### Submitting pull requests - -We will gladly accept bug fixes, or additions to this library. Please fork this library, commit & push your changes, and open a pull request. Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following. - -- If you plan to work on something major, please open an issue to discuss the design first. -- Don't break backwards compatibility. If you really have to, open an issue to discuss this first. -- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving. -- Run [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) to detect any suspicious constructs in your code that could be bugs. -- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`.You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors. -- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems. -- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions. -- Make sure your code is supported by all the Go versions we support. You can rely on [Travis CI](https://travis-ci.org/Shopify/sarama) for testing older Go versions diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/MIT-LICENSE b/Godeps/_workspace/src/github.com/Shopify/sarama/MIT-LICENSE deleted file mode 100644 index 8121b63b1..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/MIT-LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2013 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/Makefile b/Godeps/_workspace/src/github.com/Shopify/sarama/Makefile deleted file mode 100644 index b76e97a97..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -default: fmt vet errcheck test - -test: - go test -v -timeout 60s -race ./... - -vet: - go vet ./... - -errcheck: - errcheck github.com/Shopify/sarama/... - -fmt: - @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi - -install_dependencies: install_errcheck install_go_vet get - -install_errcheck: - go get github.com/kisielk/errcheck - -install_go_vet: - go get golang.org/x/tools/cmd/vet - -get: - go get -t diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/README.md deleted file mode 100644 index 486372730..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/README.md +++ /dev/null @@ -1,31 +0,0 @@ -sarama -====== - -[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama) -[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama) - -Sarama is an MIT-licensed Go client library for Apache Kafka 0.8 (and later). - -### Getting started - -- API documentation and example are available via godoc at https://godoc.org/github.com/Shopify/sarama. -- Mocks for testing are available in the [mocks](./mocks) subpackage. -- The [examples](./examples) directory contains more elaborate example applications. -- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation. -- There is a google group for Kafka client users and authors at https://groups.google.com/forum/#!forum/kafka-clients - -### Compatibility and API stability - -Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest releases of Kafka -and Go, and we provide a two month grace period for older releases. This means we currently officially -support Go 1.3 and 1.4, and Kafka 0.8.1 and 0.8.2. - -Sarama follows semantic versioning and provides API stability via the gopkg.in service. -You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1. -A changelog is available [here](CHANGELOG.md). - -### Other - -* [Sarama wiki](https://github.com/Shopify/sarama/wiki) to get started hacking on sarama itself. -* [Kafka Project Home](https://kafka.apache.org/) -* [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/Vagrantfile b/Godeps/_workspace/src/github.com/Shopify/sarama/Vagrantfile deleted file mode 100644 index 4862dd936..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/Vagrantfile +++ /dev/null @@ -1,22 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! -VAGRANTFILE_API_VERSION = "2" - -MEMORY = 3072 - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - config.vm.box = "hashicorp/precise64" - - config.vm.provision :shell, path: "vagrant/provision.sh" - - config.vm.network "private_network", ip: "192.168.100.67" - - config.vm.provider "vmware_fusion" do |v| - v.vmx["memsize"] = MEMORY.to_s - end - config.vm.provider "virtualbox" do |v| - v.memory = MEMORY - end -end diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer.go deleted file mode 100644 index 8e229490f..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer.go +++ /dev/null @@ -1,924 +0,0 @@ -package sarama - -import ( - "fmt" - "sync" - "time" - - "github.com/eapache/go-resiliency/breaker" - "github.com/eapache/queue" -) - -// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages -// to the correct broker for the provided topic-partition, refreshing metadata as appropriate, -// and parses responses for errors. You must read from the Errors() channel or the -// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid -// leaks: it will not be garbage-collected automatically when it passes out of -// scope. -type AsyncProducer interface { - - // AsyncClose triggers a shutdown of the producer, flushing any messages it may have - // buffered. The shutdown has completed when both the Errors and Successes channels - // have been closed. When calling AsyncClose, you *must* continue to read from those - // channels in order to drain the results of any messages in flight. - AsyncClose() - - // Close shuts down the producer and flushes any messages it may have buffered. - // You must call this function before a producer object passes out of scope, as - // it may otherwise leak memory. You must call this before calling Close on the - // underlying client. - Close() error - - // Input is the input channel for the user to write messages to that they wish to send. - Input() chan<- *ProducerMessage - - // Successes is the success output channel back to the user when AckSuccesses is enabled. - // If Return.Successes is true, you MUST read from this channel or the Producer will deadlock. - // It is suggested that you send and read messages together in a single select statement. - Successes() <-chan *ProducerMessage - - // Errors is the error output channel back to the user. You MUST read from this channel - // or the Producer will deadlock when the channel is full. Alternatively, you can set - // Producer.Return.Errors in your config to false, which prevents errors to be returned. - Errors() <-chan *ProducerError -} - -type asyncProducer struct { - client Client - conf *Config - ownClient bool - - errors chan *ProducerError - input, successes, retries chan *ProducerMessage - inFlight sync.WaitGroup - - brokers map[*Broker]chan<- *ProducerMessage - brokerRefs map[chan<- *ProducerMessage]int - brokerLock sync.Mutex -} - -// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. -func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { - client, err := NewClient(addrs, conf) - if err != nil { - return nil, err - } - - p, err := NewAsyncProducerFromClient(client) - if err != nil { - return nil, err - } - p.(*asyncProducer).ownClient = true - return p, nil -} - -// NewAsyncProducerFromClient creates a new Producer using the given client. It is still -// necessary to call Close() on the underlying client when shutting down this producer. -func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { - // Check that we are not dealing with a closed Client before processing any other arguments - if client.Closed() { - return nil, ErrClosedClient - } - - p := &asyncProducer{ - client: client, - conf: client.Config(), - errors: make(chan *ProducerError), - input: make(chan *ProducerMessage), - successes: make(chan *ProducerMessage), - retries: make(chan *ProducerMessage), - brokers: make(map[*Broker]chan<- *ProducerMessage), - brokerRefs: make(map[chan<- *ProducerMessage]int), - } - - // launch our singleton dispatchers - go withRecover(p.dispatcher) - go withRecover(p.retryHandler) - - return p, nil -} - -type flagSet int8 - -const ( - chaser flagSet = 1 << iota // message is last in a group that failed - shutdown // start the shutdown process -) - -// ProducerMessage is the collection of elements passed to the Producer in order to send a message. -type ProducerMessage struct { - Topic string // The Kafka topic for this message. - Key Encoder // The partitioning key for this message. It must implement the Encoder interface. Pre-existing Encoders include StringEncoder and ByteEncoder. - Value Encoder // The actual message to store in Kafka. It must implement the Encoder interface. Pre-existing Encoders include StringEncoder and ByteEncoder. - - // These are filled in by the producer as the message is processed - Offset int64 // Offset is the offset of the message stored on the broker. This is only guaranteed to be defined if the message was successfully delivered and RequiredAcks is not NoResponse. - Partition int32 // Partition is the partition that the message was sent to. This is only guaranteed to be defined if the message was successfully delivered. - - Metadata interface{} // This field is used to hold arbitrary data you wish to include so it will be available when receiving on the Successes and Errors channels. Sarama completely ignores this field and is only to be used for pass-through data. - - retries int - flags flagSet -} - -func (m *ProducerMessage) byteSize() int { - size := 26 // the metadata overhead of CRC, flags, etc. - if m.Key != nil { - size += m.Key.Length() - } - if m.Value != nil { - size += m.Value.Length() - } - return size -} - -func (m *ProducerMessage) clear() { - m.flags = 0 - m.retries = 0 -} - -// ProducerError is the type of error generated when the producer fails to deliver a message. -// It contains the original ProducerMessage as well as the actual error value. -type ProducerError struct { - Msg *ProducerMessage - Err error -} - -func (pe ProducerError) Error() string { - return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err) -} - -// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface. -// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel -// when closing a producer. -type ProducerErrors []*ProducerError - -func (pe ProducerErrors) Error() string { - return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe)) -} - -func (p *asyncProducer) Errors() <-chan *ProducerError { - return p.errors -} - -func (p *asyncProducer) Successes() <-chan *ProducerMessage { - return p.successes -} - -func (p *asyncProducer) Input() chan<- *ProducerMessage { - return p.input -} - -func (p *asyncProducer) Close() error { - p.AsyncClose() - - if p.conf.Producer.Return.Successes { - go withRecover(func() { - for _ = range p.successes { - } - }) - } - - var errors ProducerErrors - if p.conf.Producer.Return.Errors { - for event := range p.errors { - errors = append(errors, event) - } - } - - if len(errors) > 0 { - return errors - } - return nil -} - -func (p *asyncProducer) AsyncClose() { - go withRecover(p.shutdown) -} - -// singleton -// dispatches messages by topic -func (p *asyncProducer) dispatcher() { - handlers := make(map[string]chan<- *ProducerMessage) - shuttingDown := false - - for msg := range p.input { - if msg == nil { - Logger.Println("Something tried to send a nil message, it was ignored.") - continue - } - - if msg.flags&shutdown != 0 { - shuttingDown = true - p.inFlight.Done() - continue - } else if msg.retries == 0 { - if shuttingDown { - // we can't just call returnError here because that decrements the wait group, - // which hasn't been incremented yet for this message, and shouldn't be - pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown} - if p.conf.Producer.Return.Errors { - p.errors <- pErr - } else { - Logger.Println(pErr) - } - continue - } - p.inFlight.Add(1) - } - - if (p.conf.Producer.Compression == CompressionNone && msg.Value != nil && msg.Value.Length() > p.conf.Producer.MaxMessageBytes) || - (msg.byteSize() > p.conf.Producer.MaxMessageBytes) { - - p.returnError(msg, ErrMessageSizeTooLarge) - continue - } - - handler := handlers[msg.Topic] - if handler == nil { - handler = p.newTopicProducer(msg.Topic) - handlers[msg.Topic] = handler - } - - handler <- msg - } - - for _, handler := range handlers { - close(handler) - } -} - -// one per topic -// partitions messages, then dispatches them by partition -type topicProducer struct { - parent *asyncProducer - topic string - input <-chan *ProducerMessage - - breaker *breaker.Breaker - handlers map[int32]chan<- *ProducerMessage - partitioner Partitioner -} - -func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage { - input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) - tp := &topicProducer{ - parent: p, - topic: topic, - input: input, - breaker: breaker.New(3, 1, 10*time.Second), - handlers: make(map[int32]chan<- *ProducerMessage), - partitioner: p.conf.Producer.Partitioner(topic), - } - go withRecover(tp.dispatch) - return input -} - -func (tp *topicProducer) dispatch() { - for msg := range tp.input { - if msg.retries == 0 { - if err := tp.partitionMessage(msg); err != nil { - tp.parent.returnError(msg, err) - continue - } - } - - handler := tp.handlers[msg.Partition] - if handler == nil { - handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition) - tp.handlers[msg.Partition] = handler - } - - handler <- msg - } - - for _, handler := range tp.handlers { - close(handler) - } -} - -func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { - var partitions []int32 - - err := tp.breaker.Run(func() (err error) { - if tp.partitioner.RequiresConsistency() { - partitions, err = tp.parent.client.Partitions(msg.Topic) - } else { - partitions, err = tp.parent.client.WritablePartitions(msg.Topic) - } - return - }) - - if err != nil { - return err - } - - numPartitions := int32(len(partitions)) - - if numPartitions == 0 { - return ErrLeaderNotAvailable - } - - choice, err := tp.partitioner.Partition(msg, numPartitions) - - if err != nil { - return err - } else if choice < 0 || choice >= numPartitions { - return ErrInvalidPartition - } - - msg.Partition = partitions[choice] - - return nil -} - -// one per partition per topic -// dispatches messages to the appropriate broker -// also responsible for maintaining message order during retries -type partitionProducer struct { - parent *asyncProducer - topic string - partition int32 - input <-chan *ProducerMessage - - leader *Broker - breaker *breaker.Breaker - output chan<- *ProducerMessage - - // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through, - // all other messages get buffered in retryState[msg.retries].buf to preserve ordering - // retryState[msg.retries].expectChaser simply tracks whether we've seen a chaser message for a given level (and - // therefore whether our buffer is complete and safe to flush) - highWatermark int - retryState []partitionRetryState -} - -type partitionRetryState struct { - buf []*ProducerMessage - expectChaser bool -} - -func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage { - input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) - pp := &partitionProducer{ - parent: p, - topic: topic, - partition: partition, - input: input, - - breaker: breaker.New(3, 1, 10*time.Second), - retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1), - } - go withRecover(pp.dispatch) - return input -} - -func (pp *partitionProducer) dispatch() { - // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` - // on the first message - pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition) - if pp.leader != nil { - pp.output = pp.parent.getBrokerProducer(pp.leader) - } - - for msg := range pp.input { - if msg.retries > pp.highWatermark { - // a new, higher, retry level; handle it and then back off - pp.newHighWatermark(msg.retries) - time.Sleep(pp.parent.conf.Producer.Retry.Backoff) - } else if pp.highWatermark > 0 { - // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level - if msg.retries < pp.highWatermark { - // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a chaser) - if msg.flags&chaser == chaser { - pp.retryState[msg.retries].expectChaser = false - pp.parent.inFlight.Done() // this chaser is now handled and will be garbage collected - } else { - pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg) - } - continue - } else if msg.flags&chaser == chaser { - // this message is of the current retry level (msg.retries == highWatermark) and the chaser flag is set, - // meaning this retry level is done and we can go down (at least) one level and flush that - pp.retryState[pp.highWatermark].expectChaser = false - pp.flushRetryBuffers() - pp.parent.inFlight.Done() // this chaser is now handled and will be garbage collected - continue - } - } - - // if we made it this far then the current msg contains real data, and can be sent to the next goroutine - // without breaking any of our ordering guarantees - - if pp.output == nil { - if err := pp.updateLeader(); err != nil { - pp.parent.returnError(msg, err) - time.Sleep(pp.parent.conf.Producer.Retry.Backoff) - continue - } - Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) - } - - pp.output <- msg - } - - if pp.output != nil { - pp.parent.unrefBrokerProducer(pp.leader, pp.output) - } -} - -func (pp *partitionProducer) newHighWatermark(hwm int) { - Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm) - pp.highWatermark = hwm - - // send off a chaser so that we know when everything "in between" has made it - // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages) - pp.retryState[pp.highWatermark].expectChaser = true - pp.parent.inFlight.Add(1) // we're generating a chaser message; track it so we don't shut down while it's still inflight - pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: chaser, retries: pp.highWatermark - 1} - - // a new HWM means that our current broker selection is out of date - Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) - pp.parent.unrefBrokerProducer(pp.leader, pp.output) - pp.output = nil -} - -func (pp *partitionProducer) flushRetryBuffers() { - Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark) - for { - pp.highWatermark-- - - if pp.output == nil { - if err := pp.updateLeader(); err != nil { - pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err) - goto flushDone - } - Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) - } - - for _, msg := range pp.retryState[pp.highWatermark].buf { - pp.output <- msg - } - - flushDone: - pp.retryState[pp.highWatermark].buf = nil - if pp.retryState[pp.highWatermark].expectChaser { - Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark) - break - } else if pp.highWatermark == 0 { - Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition) - break - } - } -} - -func (pp *partitionProducer) updateLeader() error { - return pp.breaker.Run(func() (err error) { - if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil { - return err - } - - if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil { - return err - } - - pp.output = pp.parent.getBrokerProducer(pp.leader) - return nil - }) -} - -// one per broker, constructs both an aggregator and a flusher -func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage { - input := make(chan *ProducerMessage) - bridge := make(chan []*ProducerMessage) - - a := &aggregator{ - parent: p, - broker: broker, - input: input, - output: bridge, - } - go withRecover(a.run) - - f := &flusher{ - parent: p, - broker: broker, - input: bridge, - currentRetries: make(map[string]map[int32]error), - } - go withRecover(f.run) - - return input -} - -// groups messages together into appropriately-sized batches for sending to the broker -// based on https://godoc.org/github.com/eapache/channels#BatchingChannel -type aggregator struct { - parent *asyncProducer - broker *Broker - input <-chan *ProducerMessage - output chan<- []*ProducerMessage - - buffer []*ProducerMessage - bufferBytes int - timer <-chan time.Time -} - -func (a *aggregator) run() { - var output chan<- []*ProducerMessage - - for { - select { - case msg := <-a.input: - if msg == nil { - goto shutdown - } - - if a.wouldOverflow(msg) { - Logger.Printf("producer/aggregator/%d maximum request accumulated, forcing blocking flush\n", a.broker.ID()) - a.output <- a.buffer - a.reset() - output = nil - } - - a.buffer = append(a.buffer, msg) - a.bufferBytes += msg.byteSize() - - if a.readyToFlush(msg) { - output = a.output - } else if a.parent.conf.Producer.Flush.Frequency > 0 && a.timer == nil { - a.timer = time.After(a.parent.conf.Producer.Flush.Frequency) - } - case <-a.timer: - output = a.output - case output <- a.buffer: - a.reset() - output = nil - } - } - -shutdown: - if len(a.buffer) > 0 { - a.output <- a.buffer - } - close(a.output) -} - -func (a *aggregator) wouldOverflow(msg *ProducerMessage) bool { - switch { - // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. - case a.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)): - return true - // Would we overflow the size-limit of a compressed message-batch? - case a.parent.conf.Producer.Compression != CompressionNone && a.bufferBytes+msg.byteSize() >= a.parent.conf.Producer.MaxMessageBytes: - return true - // Would we overflow simply in number of messages? - case a.parent.conf.Producer.Flush.MaxMessages > 0 && len(a.buffer) >= a.parent.conf.Producer.Flush.MaxMessages: - return true - default: - return false - } -} - -func (a *aggregator) readyToFlush(msg *ProducerMessage) bool { - switch { - // If all three config values are 0, we always flush as-fast-as-possible - case a.parent.conf.Producer.Flush.Frequency == 0 && a.parent.conf.Producer.Flush.Bytes == 0 && a.parent.conf.Producer.Flush.Messages == 0: - return true - // If the messages is a chaser we must flush to maintain the state-machine - case msg.flags&chaser == chaser: - return true - // If we've passed the message trigger-point - case a.parent.conf.Producer.Flush.Messages > 0 && len(a.buffer) >= a.parent.conf.Producer.Flush.Messages: - return true - // If we've passed the byte trigger-point - case a.parent.conf.Producer.Flush.Bytes > 0 && a.bufferBytes >= a.parent.conf.Producer.Flush.Bytes: - return true - default: - return false - } -} - -func (a *aggregator) reset() { - a.timer = nil - a.buffer = nil - a.bufferBytes = 0 -} - -// takes a batch at a time from the aggregator and sends to the broker -type flusher struct { - parent *asyncProducer - broker *Broker - input <-chan []*ProducerMessage - - currentRetries map[string]map[int32]error -} - -func (f *flusher) run() { - var closing error - - Logger.Printf("producer/flusher/%d starting up\n", f.broker.ID()) - - for batch := range f.input { - if closing != nil { - f.parent.retryMessages(batch, closing) - continue - } - - msgSets := f.groupAndFilter(batch) - request := f.parent.buildRequest(msgSets) - if request == nil { - continue - } - - response, err := f.broker.Produce(request) - - switch err.(type) { - case nil: - break - case PacketEncodingError: - f.parent.returnErrors(batch, err) - continue - default: - Logger.Printf("producer/flusher/%d state change to [closing] because %s\n", f.broker.ID(), err) - f.parent.abandonBrokerConnection(f.broker) - _ = f.broker.Close() - closing = err - f.parent.retryMessages(batch, err) - continue - } - - if response == nil { - // this only happens when RequiredAcks is NoResponse, so we have to assume success - f.parent.returnSuccesses(batch) - continue - } - - f.parseResponse(msgSets, response) - } - Logger.Printf("producer/flusher/%d shut down\n", f.broker.ID()) -} - -func (f *flusher) groupAndFilter(batch []*ProducerMessage) map[string]map[int32][]*ProducerMessage { - msgSets := make(map[string]map[int32][]*ProducerMessage) - - for i, msg := range batch { - - if f.currentRetries[msg.Topic] != nil && f.currentRetries[msg.Topic][msg.Partition] != nil { - // we're currently retrying this partition so we need to filter out this message - f.parent.retryMessages([]*ProducerMessage{msg}, f.currentRetries[msg.Topic][msg.Partition]) - batch[i] = nil - - if msg.flags&chaser == chaser { - // ...but now we can start processing future messages again - Logger.Printf("producer/flusher/%d state change to [normal] on %s/%d\n", - f.broker.ID(), msg.Topic, msg.Partition) - delete(f.currentRetries[msg.Topic], msg.Partition) - } - - continue - } - - partitionSet := msgSets[msg.Topic] - if partitionSet == nil { - partitionSet = make(map[int32][]*ProducerMessage) - msgSets[msg.Topic] = partitionSet - } - - partitionSet[msg.Partition] = append(partitionSet[msg.Partition], msg) - } - - return msgSets -} - -func (f *flusher) parseResponse(msgSets map[string]map[int32][]*ProducerMessage, response *ProduceResponse) { - // we iterate through the blocks in the request set, not the response, so that we notice - // if the response is missing a block completely - for topic, partitionSet := range msgSets { - for partition, msgs := range partitionSet { - block := response.GetBlock(topic, partition) - if block == nil { - f.parent.returnErrors(msgs, ErrIncompleteResponse) - continue - } - - switch block.Err { - // Success - case ErrNoError: - for i := range msgs { - msgs[i].Offset = block.Offset + int64(i) - } - f.parent.returnSuccesses(msgs) - // Retriable errors - case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: - Logger.Printf("producer/flusher/%d state change to [retrying] on %s/%d because %v\n", - f.broker.ID(), topic, partition, block.Err) - if f.currentRetries[topic] == nil { - f.currentRetries[topic] = make(map[int32]error) - } - f.currentRetries[topic][partition] = block.Err - f.parent.retryMessages(msgs, block.Err) - // Other non-retriable errors - default: - f.parent.returnErrors(msgs, block.Err) - } - } - } -} - -// singleton -// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock -// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel -func (p *asyncProducer) retryHandler() { - var msg *ProducerMessage - buf := queue.New() - - for { - if buf.Length() == 0 { - msg = <-p.retries - } else { - select { - case msg = <-p.retries: - case p.input <- buf.Peek().(*ProducerMessage): - buf.Remove() - continue - } - } - - if msg == nil { - return - } - - buf.Add(msg) - } -} - -// utility functions - -func (p *asyncProducer) shutdown() { - Logger.Println("Producer shutting down.") - p.inFlight.Add(1) - p.input <- &ProducerMessage{flags: shutdown} - - p.inFlight.Wait() - - if p.ownClient { - err := p.client.Close() - if err != nil { - Logger.Println("producer/shutdown failed to close the embedded client:", err) - } - } - - close(p.input) - close(p.retries) - close(p.errors) - close(p.successes) -} - -func (p *asyncProducer) buildRequest(batch map[string]map[int32][]*ProducerMessage) *ProduceRequest { - - req := &ProduceRequest{RequiredAcks: p.conf.Producer.RequiredAcks, Timeout: int32(p.conf.Producer.Timeout / time.Millisecond)} - empty := true - - for topic, partitionSet := range batch { - for partition, msgSet := range partitionSet { - setToSend := new(MessageSet) - setSize := 0 - for _, msg := range msgSet { - var keyBytes, valBytes []byte - var err error - if msg.Key != nil { - if keyBytes, err = msg.Key.Encode(); err != nil { - p.returnError(msg, err) - continue - } - } - if msg.Value != nil { - if valBytes, err = msg.Value.Encode(); err != nil { - p.returnError(msg, err) - continue - } - } - - if p.conf.Producer.Compression != CompressionNone && setSize+msg.byteSize() > p.conf.Producer.MaxMessageBytes { - // compression causes message-sets to be wrapped as single messages, which have tighter - // size requirements, so we have to respect those limits - valBytes, err := encode(setToSend) - if err != nil { - Logger.Println(err) // if this happens, it's basically our fault. - panic(err) - } - req.AddMessage(topic, partition, &Message{Codec: p.conf.Producer.Compression, Key: nil, Value: valBytes}) - setToSend = new(MessageSet) - setSize = 0 - } - setSize += msg.byteSize() - - setToSend.addMessage(&Message{Codec: CompressionNone, Key: keyBytes, Value: valBytes}) - empty = false - } - - if p.conf.Producer.Compression == CompressionNone { - req.AddSet(topic, partition, setToSend) - } else { - valBytes, err := encode(setToSend) - if err != nil { - Logger.Println(err) // if this happens, it's basically our fault. - panic(err) - } - req.AddMessage(topic, partition, &Message{Codec: p.conf.Producer.Compression, Key: nil, Value: valBytes}) - } - } - } - - if empty { - return nil - } - return req -} - -func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { - msg.clear() - pErr := &ProducerError{Msg: msg, Err: err} - if p.conf.Producer.Return.Errors { - p.errors <- pErr - } else { - Logger.Println(pErr) - } - p.inFlight.Done() -} - -func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) { - for _, msg := range batch { - if msg != nil { - p.returnError(msg, err) - } - } -} - -func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) { - for _, msg := range batch { - if msg == nil { - continue - } - if p.conf.Producer.Return.Successes { - msg.clear() - p.successes <- msg - } - p.inFlight.Done() - } -} - -func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) { - for _, msg := range batch { - if msg == nil { - continue - } - if msg.retries >= p.conf.Producer.Retry.Max { - p.returnError(msg, err) - } else { - msg.retries++ - p.retries <- msg - } - } -} - -func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage { - p.brokerLock.Lock() - defer p.brokerLock.Unlock() - - bp := p.brokers[broker] - - if bp == nil { - bp = p.newBrokerProducer(broker) - p.brokers[broker] = bp - p.brokerRefs[bp] = 0 - } - - p.brokerRefs[bp]++ - - return bp -} - -func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) { - p.brokerLock.Lock() - defer p.brokerLock.Unlock() - - p.brokerRefs[bp]-- - if p.brokerRefs[bp] == 0 { - close(bp) - delete(p.brokerRefs, bp) - - if p.brokers[broker] == bp { - delete(p.brokers, broker) - } - } -} - -func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { - p.brokerLock.Lock() - defer p.brokerLock.Unlock() - - delete(p.brokers, broker) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer_test.go deleted file mode 100644 index 403456839..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/async_producer_test.go +++ /dev/null @@ -1,743 +0,0 @@ -package sarama - -import ( - "errors" - "log" - "os" - "os/signal" - "sync" - "testing" - "time" -) - -const TestMessage = "ABC THE MESSAGE" - -func closeProducer(t *testing.T, p AsyncProducer) { - var wg sync.WaitGroup - p.AsyncClose() - - wg.Add(2) - go func() { - for _ = range p.Successes() { - t.Error("Unexpected message on Successes()") - } - wg.Done() - }() - go func() { - for msg := range p.Errors() { - t.Error(msg.Err) - } - wg.Done() - }() - wg.Wait() -} - -func expectResults(t *testing.T, p AsyncProducer, successes, errors int) { - for successes > 0 || errors > 0 { - select { - case msg := <-p.Errors(): - if msg.Msg.flags != 0 { - t.Error("Message had flags set") - } - errors-- - if errors < 0 { - t.Error(msg.Err) - } - case msg := <-p.Successes(): - if msg.flags != 0 { - t.Error("Message had flags set") - } - successes-- - if successes < 0 { - t.Error("Too many successes") - } - } - } -} - -type testPartitioner chan *int32 - -func (p testPartitioner) Partition(msg *ProducerMessage, numPartitions int32) (int32, error) { - part := <-p - if part == nil { - return 0, errors.New("BOOM") - } - - return *part, nil -} - -func (p testPartitioner) RequiresConsistency() bool { - return true -} - -func (p testPartitioner) feed(partition int32) { - p <- &partition -} - -func TestAsyncProducer(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader := newMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Metadata: i} - } - for i := 0; i < 10; i++ { - select { - case msg := <-producer.Errors(): - t.Error(msg.Err) - if msg.Msg.flags != 0 { - t.Error("Message had flags set") - } - case msg := <-producer.Successes(): - if msg.flags != 0 { - t.Error("Message had flags set") - } - if msg.Metadata.(int) != i { - t.Error("Message metadata did not match") - } - } - } - - closeProducer(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestAsyncProducerMultipleFlushes(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader := newMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - leader.Returns(prodSuccess) - leader.Returns(prodSuccess) - - config := NewConfig() - config.Producer.Flush.Messages = 5 - config.Producer.Return.Successes = true - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for flush := 0; flush < 3; flush++ { - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - expectResults(t, producer, 5, 0) - } - - closeProducer(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestAsyncProducerMultipleBrokers(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader0 := newMockBroker(t, 2) - leader1 := newMockBroker(t, 3) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID()) - metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodResponse0 := new(ProduceResponse) - prodResponse0.AddTopicPartition("my_topic", 0, ErrNoError) - leader0.Returns(prodResponse0) - - prodResponse1 := new(ProduceResponse) - prodResponse1.AddTopicPartition("my_topic", 1, ErrNoError) - leader1.Returns(prodResponse1) - - config := NewConfig() - config.Producer.Flush.Messages = 5 - config.Producer.Return.Successes = true - config.Producer.Partitioner = NewRoundRobinPartitioner - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - expectResults(t, producer, 10, 0) - - closeProducer(t, producer) - leader1.Close() - leader0.Close() - seedBroker.Close() -} - -func TestAsyncProducerCustomPartitioner(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader := newMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodResponse := new(ProduceResponse) - prodResponse.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodResponse) - - config := NewConfig() - config.Producer.Flush.Messages = 2 - config.Producer.Return.Successes = true - config.Producer.Partitioner = func(topic string) Partitioner { - p := make(testPartitioner) - go func() { - p.feed(0) - p <- nil - p <- nil - p <- nil - p.feed(0) - }() - return p - } - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - expectResults(t, producer, 2, 3) - - closeProducer(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestAsyncProducerFailureRetry(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader1 := newMockBroker(t, 2) - leader2 := newMockBroker(t, 3) - - metadataLeader1 := new(MetadataResponse) - metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) - metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader1) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - seedBroker.Close() - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader1.Returns(prodNotLeader) - - metadataLeader2 := new(MetadataResponse) - metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) - metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) - leader1.Returns(metadataLeader2) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - leader1.Close() - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - - leader2.Close() - closeProducer(t, producer) -} - -// If a Kafka broker becomes unavailable and then returns back in service, then -// producer reconnects to it and continues sending messages. -func TestAsyncProducerBrokerBounce(t *testing.T) { - // Given - seedBroker := newMockBroker(t, 1) - leader := newMockBroker(t, 2) - leaderAddr := leader.Addr() - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leaderAddr, leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - - config := NewConfig() - config.Producer.Flush.Messages = 1 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - // When: a broker connection gets reset by a broker (network glitch, restart, you name it). - leader.Close() // producer should get EOF - leader = newMockBrokerAddr(t, 2, leaderAddr) // start it up again right away for giggles - seedBroker.Returns(metadataResponse) // tell it to go to broker 2 again - - // Then: a produced message goes through the new broker connection. - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - closeProducer(t, producer) - seedBroker.Close() - leader.Close() -} - -func TestAsyncProducerBrokerBounceWithStaleMetadata(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader1 := newMockBroker(t, 2) - leader2 := newMockBroker(t, 3) - - metadataLeader1 := new(MetadataResponse) - metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) - metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader1) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Max = 3 - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - leader1.Close() // producer should get EOF - seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down - seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down - - // ok fine, tell it to go to leader2 finally - metadataLeader2 := new(MetadataResponse) - metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) - metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader2) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - seedBroker.Close() - leader2.Close() - - closeProducer(t, producer) -} - -func TestAsyncProducerMultipleRetries(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader1 := newMockBroker(t, 2) - leader2 := newMockBroker(t, 3) - - metadataLeader1 := new(MetadataResponse) - metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) - metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader1) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Max = 4 - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader1.Returns(prodNotLeader) - - metadataLeader2 := new(MetadataResponse) - metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) - metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader2) - leader2.Returns(prodNotLeader) - seedBroker.Returns(metadataLeader1) - leader1.Returns(prodNotLeader) - seedBroker.Returns(metadataLeader1) - leader1.Returns(prodNotLeader) - seedBroker.Returns(metadataLeader2) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - leader2.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - - seedBroker.Close() - leader1.Close() - leader2.Close() - closeProducer(t, producer) -} - -func TestAsyncProducerOutOfRetries(t *testing.T) { - t.Skip("Enable once bug #294 is fixed.") - - seedBroker := newMockBroker(t, 1) - leader := newMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - config.Producer.Retry.Max = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader.Returns(prodNotLeader) - - for i := 0; i < 10; i++ { - select { - case msg := <-producer.Errors(): - if msg.Err != ErrNotLeaderForPartition { - t.Error(msg.Err) - } - case <-producer.Successes(): - t.Error("Unexpected success") - } - } - - seedBroker.Returns(metadataResponse) - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - - expectResults(t, producer, 10, 0) - - leader.Close() - seedBroker.Close() - safeClose(t, producer) -} - -func TestAsyncProducerRetryWithReferenceOpen(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader := newMockBroker(t, 2) - leaderAddr := leader.Addr() - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leaderAddr, leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - config.Producer.Retry.Max = 1 - config.Producer.Partitioner = NewRoundRobinPartitioner - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - // prime partition 0 - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - // prime partition 1 - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - prodSuccess = new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 1, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - // reboot the broker (the producer will get EOF on its existing connection) - leader.Close() - leader = newMockBrokerAddr(t, 2, leaderAddr) - - // send another message on partition 0 to trigger the EOF and retry - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - - // tell partition 0 to go to that broker again - seedBroker.Returns(metadataResponse) - - // succeed this time - prodSuccess = new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 1, 0) - - // shutdown - closeProducer(t, producer) - seedBroker.Close() - leader.Close() -} - -func TestAsyncProducerFlusherRetryCondition(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader := newMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Producer.Flush.Messages = 5 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - config.Producer.Retry.Max = 1 - config.Producer.Partitioner = NewManualPartitioner - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - // prime partitions - for p := int32(0); p < 2; p++ { - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: p} - } - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", p, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 5, 0) - } - - // send more messages on partition 0 - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0} - } - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader.Returns(prodNotLeader) - - // tell partition 0 to go to that broker again - seedBroker.Returns(metadataResponse) - - // succeed this time - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 5, 0) - - // put five more through - for i := 0; i < 5; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0} - } - leader.Returns(prodSuccess) - expectResults(t, producer, 5, 0) - - // shutdown - closeProducer(t, producer) - seedBroker.Close() - leader.Close() -} - -func TestAsyncProducerRetryShutdown(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader := newMockBroker(t, 2) - - metadataLeader := new(MetadataResponse) - metadataLeader.AddBroker(leader.Addr(), leader.BrokerID()) - metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataLeader) - - config := NewConfig() - config.Producer.Flush.Messages = 10 - config.Producer.Return.Successes = true - config.Producer.Retry.Backoff = 0 - producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} - } - producer.AsyncClose() - time.Sleep(5 * time.Millisecond) // let the shutdown goroutine kick in - - producer.Input() <- &ProducerMessage{Topic: "FOO"} - if err := <-producer.Errors(); err.Err != ErrShuttingDown { - t.Error(err) - } - - prodNotLeader := new(ProduceResponse) - prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) - leader.Returns(prodNotLeader) - - seedBroker.Returns(metadataLeader) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - expectResults(t, producer, 10, 0) - - seedBroker.Close() - leader.Close() - - // wait for the async-closed producer to shut down fully - for err := range producer.Errors() { - t.Error(err) - } -} - -// This example shows how to use the producer while simultaneously -// reading the Errors channel to know about any failures. -func ExampleAsyncProducer_select() { - producer, err := NewAsyncProducer([]string{"localhost:9092"}, nil) - if err != nil { - panic(err) - } - - defer func() { - if err := producer.Close(); err != nil { - log.Fatalln(err) - } - }() - - // Trap SIGINT to trigger a shutdown. - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt) - - var enqueued, errors int -ProducerLoop: - for { - select { - case producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder("testing 123")}: - enqueued++ - case err := <-producer.Errors(): - log.Println("Failed to produce message", err) - errors++ - case <-signals: - break ProducerLoop - } - } - - log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors) -} - -// This example shows how to use the producer with separate goroutines -// reading from the Successes and Errors channels. Note that in order -// for the Successes channel to be populated, you have to set -// config.Producer.Return.Successes to true. -func ExampleAsyncProducer_goroutines() { - config := NewConfig() - config.Producer.Return.Successes = true - producer, err := NewAsyncProducer([]string{"localhost:9092"}, config) - if err != nil { - panic(err) - } - - // Trap SIGINT to trigger a graceful shutdown. - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt) - - var ( - wg sync.WaitGroup - enqueued, successes, errors int - ) - - wg.Add(1) - go func() { - defer wg.Done() - for _ = range producer.Successes() { - successes++ - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - for err := range producer.Errors() { - log.Println(err) - errors++ - } - }() - -ProducerLoop: - for { - message := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")} - select { - case producer.Input() <- message: - enqueued++ - - case <-signals: - producer.AsyncClose() // Trigger a shutdown of the producer. - break ProducerLoop - } - } - - wg.Wait() - - log.Printf("Successfully produced: %d; errors: %d\n", successes, errors) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/broker.go b/Godeps/_workspace/src/github.com/Shopify/sarama/broker.go deleted file mode 100644 index eb5bc0bf8..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/broker.go +++ /dev/null @@ -1,385 +0,0 @@ -package sarama - -import ( - "crypto/tls" - "fmt" - "io" - "net" - "strconv" - "sync" - "sync/atomic" - "time" -) - -// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. -type Broker struct { - id int32 - addr string - - conf *Config - correlationID int32 - conn net.Conn - connErr error - lock sync.Mutex - opened int32 - - responses chan responsePromise - done chan bool -} - -type responsePromise struct { - correlationID int32 - packets chan []byte - errors chan error -} - -// NewBroker creates and returns a Broker targetting the given host:port address. -// This does not attempt to actually connect, you have to call Open() for that. -func NewBroker(addr string) *Broker { - return &Broker{id: -1, addr: addr} -} - -// Open tries to connect to the Broker if it is not already connected or connecting, but does not block -// waiting for the connection to complete. This means that any subsequent operations on the broker will -// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call, -// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or -// AlreadyConnected. If conf is nil, the result of NewConfig() is used. -func (b *Broker) Open(conf *Config) error { - if conf == nil { - conf = NewConfig() - } - - err := conf.Validate() - if err != nil { - return err - } - - if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) { - return ErrAlreadyConnected - } - - b.lock.Lock() - - if b.conn != nil { - b.lock.Unlock() - Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, ErrAlreadyConnected) - return ErrAlreadyConnected - } - - go withRecover(func() { - defer b.lock.Unlock() - - dialer := net.Dialer{ - Timeout: conf.Net.DialTimeout, - KeepAlive: conf.Net.KeepAlive, - } - - if conf.Net.TLS.Enable { - b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config) - } else { - b.conn, b.connErr = dialer.Dial("tcp", b.addr) - } - if b.connErr != nil { - b.conn = nil - atomic.StoreInt32(&b.opened, 0) - Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr) - return - } - - b.conf = conf - b.done = make(chan bool) - b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1) - - if b.id >= 0 { - Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) - } else { - Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr) - } - go withRecover(b.responseReceiver) - }) - - return nil -} - -// Connected returns true if the broker is connected and false otherwise. If the broker is not -// connected but it had tried to connect, the error from that connection attempt is also returned. -func (b *Broker) Connected() (bool, error) { - b.lock.Lock() - defer b.lock.Unlock() - - return b.conn != nil, b.connErr -} - -func (b *Broker) Close() error { - b.lock.Lock() - defer b.lock.Unlock() - - if b.conn == nil { - return ErrNotConnected - } - - close(b.responses) - <-b.done - - err := b.conn.Close() - - b.conn = nil - b.connErr = nil - b.done = nil - b.responses = nil - - atomic.StoreInt32(&b.opened, 0) - - if err == nil { - Logger.Printf("Closed connection to broker %s\n", b.addr) - } else { - Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) - } - - return err -} - -// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known. -func (b *Broker) ID() int32 { - return b.id -} - -// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker. -func (b *Broker) Addr() string { - return b.addr -} - -func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { - response := new(MetadataResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { - response := new(ConsumerMetadataResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { - response := new(OffsetResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { - var response *ProduceResponse - var err error - - if request.RequiredAcks == NoResponse { - err = b.sendAndReceive(request, nil) - } else { - response = new(ProduceResponse) - err = b.sendAndReceive(request, response) - } - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { - response := new(FetchResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { - response := new(OffsetCommitResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { - response := new(OffsetFetchResponse) - - err := b.sendAndReceive(request, response) - - if err != nil { - return nil, err - } - - return response, nil -} - -func (b *Broker) send(rb requestBody, promiseResponse bool) (*responsePromise, error) { - b.lock.Lock() - defer b.lock.Unlock() - - if b.conn == nil { - if b.connErr != nil { - return nil, b.connErr - } - return nil, ErrNotConnected - } - - req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} - buf, err := encode(req) - if err != nil { - return nil, err - } - - err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) - if err != nil { - return nil, err - } - - _, err = b.conn.Write(buf) - if err != nil { - return nil, err - } - b.correlationID++ - - if !promiseResponse { - return nil, nil - } - - promise := responsePromise{req.correlationID, make(chan []byte), make(chan error)} - b.responses <- promise - - return &promise, nil -} - -func (b *Broker) sendAndReceive(req requestBody, res decoder) error { - promise, err := b.send(req, res != nil) - - if err != nil { - return err - } - - if promise == nil { - return nil - } - - select { - case buf := <-promise.packets: - return decode(buf, res) - case err = <-promise.errors: - return err - } -} - -func (b *Broker) decode(pd packetDecoder) (err error) { - b.id, err = pd.getInt32() - if err != nil { - return err - } - - host, err := pd.getString() - if err != nil { - return err - } - - port, err := pd.getInt32() - if err != nil { - return err - } - - b.addr = fmt.Sprint(host, ":", port) - - return nil -} - -func (b *Broker) encode(pe packetEncoder) (err error) { - - host, portstr, err := net.SplitHostPort(b.addr) - if err != nil { - return err - } - port, err := strconv.Atoi(portstr) - if err != nil { - return err - } - - pe.putInt32(b.id) - - err = pe.putString(host) - if err != nil { - return err - } - - pe.putInt32(int32(port)) - - return nil -} - -func (b *Broker) responseReceiver() { - header := make([]byte, 8) - for response := range b.responses { - err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)) - if err != nil { - response.errors <- err - continue - } - - _, err = io.ReadFull(b.conn, header) - if err != nil { - response.errors <- err - continue - } - - decodedHeader := responseHeader{} - err = decode(header, &decodedHeader) - if err != nil { - response.errors <- err - continue - } - if decodedHeader.correlationID != response.correlationID { - // TODO if decoded ID < cur ID, discard until we catch up - // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response - response.errors <- PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)} - continue - } - - buf := make([]byte, decodedHeader.length-4) - _, err = io.ReadFull(b.conn, buf) - if err != nil { - // XXX: the above ReadFull call inherits the same ReadDeadline set at the top of this loop, so it may - // fail with a timeout error. If this happens, our connection is permanently toast since we will no longer - // be aligned correctly on the stream (we'll be reading garbage Kafka headers from the middle of data). - // Can we/should we fail harder in that case? - response.errors <- err - continue - } - - response.packets <- buf - } - close(b.done) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/broker_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/broker_test.go deleted file mode 100644 index df3499e49..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/broker_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package sarama - -import ( - "fmt" - "testing" -) - -func ExampleBroker() error { - broker := NewBroker("localhost:9092") - err := broker.Open(nil) - if err != nil { - return err - } - - request := MetadataRequest{Topics: []string{"myTopic"}} - response, err := broker.GetMetadata(&request) - if err != nil { - _ = broker.Close() - return err - } - - fmt.Println("There are", len(response.Topics), "topics active in the cluster.") - - return broker.Close() -} - -type mockEncoder struct { - bytes []byte -} - -func (m mockEncoder) encode(pe packetEncoder) error { - return pe.putRawBytes(m.bytes) -} - -func TestBrokerAccessors(t *testing.T) { - broker := NewBroker("abc:123") - - if broker.ID() != -1 { - t.Error("New broker didn't have an ID of -1.") - } - - if broker.Addr() != "abc:123" { - t.Error("New broker didn't have the correct address") - } - - broker.id = 34 - if broker.ID() != 34 { - t.Error("Manually setting broker ID did not take effect.") - } -} - -func TestSimpleBrokerCommunication(t *testing.T) { - mb := newMockBroker(t, 0) - defer mb.Close() - - broker := NewBroker(mb.Addr()) - err := broker.Open(nil) - if err != nil { - t.Fatal(err) - } - - for _, tt := range brokerTestTable { - mb.Returns(&mockEncoder{tt.response}) - } - for _, tt := range brokerTestTable { - tt.runner(t, broker) - } - - err = broker.Close() - if err != nil { - t.Error(err) - } -} - -// We're not testing encoding/decoding here, so most of the requests/responses will be empty for simplicity's sake -var brokerTestTable = []struct { - response []byte - runner func(*testing.T, *Broker) -}{ - {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := MetadataRequest{} - response, err := broker.GetMetadata(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Metadata request got no response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 't', 0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := ConsumerMetadataRequest{} - response, err := broker.GetConsumerMetadata(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Consumer Metadata request got no response!") - } - }}, - - {[]byte{}, - func(t *testing.T, broker *Broker) { - request := ProduceRequest{} - request.RequiredAcks = NoResponse - response, err := broker.Produce(&request) - if err != nil { - t.Error(err) - } - if response != nil { - t.Error("Produce request with NoResponse got a response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := ProduceRequest{} - request.RequiredAcks = WaitForLocal - response, err := broker.Produce(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Produce request without NoResponse got no response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := FetchRequest{} - response, err := broker.Fetch(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Fetch request got no response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := OffsetFetchRequest{} - response, err := broker.FetchOffset(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("OffsetFetch request got no response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := OffsetCommitRequest{} - response, err := broker.CommitOffset(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("OffsetCommit request got no response!") - } - }}, - - {[]byte{0x00, 0x00, 0x00, 0x00}, - func(t *testing.T, broker *Broker) { - request := OffsetRequest{} - response, err := broker.GetAvailableOffsets(&request) - if err != nil { - t.Error(err) - } - if response == nil { - t.Error("Offset request got no response!") - } - }}, -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/client.go b/Godeps/_workspace/src/github.com/Shopify/sarama/client.go deleted file mode 100644 index 974d223e2..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/client.go +++ /dev/null @@ -1,727 +0,0 @@ -package sarama - -import ( - "math/rand" - "sort" - "sync" - "time" -) - -// Client is a generic Kafka client. It manages connections to one or more Kafka brokers. -// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected -// automatically when it passes out of scope. A single client can be safely shared by -// multiple concurrent Producers and Consumers. -type Client interface { - // Config returns the Config struct of the client. This struct should not be altered after it - // has been created. - Config() *Config - - // Topics returns the set of available topics as retrieved from the cluster metadata. - Topics() ([]string, error) - - // Partitions returns the sorted list of all partition IDs for the given topic. - Partitions(topic string) ([]int32, error) - - // WritablePartitions returns the sorted list of all writable partition IDs for the given topic, - // where "writable" means "having a valid leader accepting writes". - WritablePartitions(topic string) ([]int32, error) - - // Leader returns the broker object that is the leader of the current topic/partition, as - // determined by querying the cluster metadata. - Leader(topic string, partitionID int32) (*Broker, error) - - // Replicas returns the set of all replica IDs for the given partition. - Replicas(topic string, partitionID int32) ([]int32, error) - - // RefreshMetadata takes a list of topics and queries the cluster to refresh the - // available metadata for those topics. If no topics are provided, it will refresh metadata - // for all topics. - RefreshMetadata(topics ...string) error - - // GetOffset queries the cluster to get the most recent available offset at the given - // time on the topic/partition combination. Time should be OffsetOldest for the earliest available - // offset, OffsetNewest for the offset of the message that will be produced next, or a time. - GetOffset(topic string, partitionID int32, time int64) (int64, error) - - // Coordinator returns the coordinating broker for a consumer group. It will return a locally cached - // value if it's available. You can call RefreshCoordinator to update the cached value. - // This function only works on Kafka 0.8.2 and higher. - Coordinator(consumerGroup string) (*Broker, error) - - // RefreshCoordinator retrieves the coordinator for a consumer group and stores it in local cache. - // This function only works on Kafka 0.8.2 and higher. - RefreshCoordinator(consumerGroup string) error - - // Close shuts down all broker connections managed by this client. It is required to call this function before - // a client object passes out of scope, as it will otherwise leak memory. You must close any Producers or Consumers - // using a client before you close the client. - Close() error - - // Closed returns true if the client has already had Close called on it - Closed() bool -} - -const ( - // OffsetNewest stands for the log head offset, i.e. the offset that will be assigned to the next message - // that will be produced to the partition. You can send this to a client's GetOffset method to get this - // offset, or when calling ConsumePartition to start consuming new messages. - OffsetNewest int64 = -1 - // OffsetOldest stands for the oldest offset available on the broker for a partition. You can send this - // to a client's GetOffset method to get this offset, or when calling ConsumePartition to start consuming - // from the oldest offset that is still available on the broker. - OffsetOldest int64 = -2 -) - -type client struct { - conf *Config - closer, closed chan none // for shutting down background metadata updater - - // the broker addresses given to us through the constructor are not guaranteed to be returned in - // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?) - // so we store them separately - seedBrokers []*Broker - deadSeeds []*Broker - - brokers map[int32]*Broker // maps broker ids to brokers - metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata - coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs - - // If the number of partitions is large, we can get some churn calling cachedPartitions, - // so the result is cached. It is important to update this value whenever metadata is changed - cachedPartitionsResults map[string][maxPartitionIndex][]int32 - - lock sync.RWMutex // protects access to the maps that hold cluster state. -} - -// NewClient creates a new Client. It connects to one of the given broker addresses -// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot -// be retrieved from any of the given broker addresses, the client is not created. -func NewClient(addrs []string, conf *Config) (Client, error) { - Logger.Println("Initializing new client") - - if conf == nil { - conf = NewConfig() - } - - if err := conf.Validate(); err != nil { - return nil, err - } - - if len(addrs) < 1 { - return nil, ConfigurationError("You must provide at least one broker address") - } - - client := &client{ - conf: conf, - closer: make(chan none), - closed: make(chan none), - brokers: make(map[int32]*Broker), - metadata: make(map[string]map[int32]*PartitionMetadata), - cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), - coordinators: make(map[string]int32), - } - - random := rand.New(rand.NewSource(time.Now().UnixNano())) - for _, index := range random.Perm(len(addrs)) { - client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) - } - - // do an initial fetch of all cluster metadata by specifing an empty list of topics - err := client.RefreshMetadata() - switch err { - case nil: - break - case ErrLeaderNotAvailable, ErrReplicaNotAvailable: - // indicates that maybe part of the cluster is down, but is not fatal to creating the client - Logger.Println(err) - default: - close(client.closed) // we haven't started the background updater yet, so we have to do this manually - _ = client.Close() - return nil, err - } - go withRecover(client.backgroundMetadataUpdater) - - Logger.Println("Successfully initialized new client") - - return client, nil -} - -func (client *client) Config() *Config { - return client.conf -} - -func (client *client) Close() error { - if client.Closed() { - // Chances are this is being called from a defer() and the error will go unobserved - // so we go ahead and log the event in this case. - Logger.Printf("Close() called on already closed client") - return ErrClosedClient - } - - // shutdown and wait for the background thread before we take the lock, to avoid races - close(client.closer) - <-client.closed - - client.lock.Lock() - defer client.lock.Unlock() - Logger.Println("Closing Client") - - for _, broker := range client.brokers { - safeAsyncClose(broker) - } - - for _, broker := range client.seedBrokers { - safeAsyncClose(broker) - } - - client.brokers = nil - client.metadata = nil - - return nil -} - -func (client *client) Closed() bool { - return client.brokers == nil -} - -func (client *client) Topics() ([]string, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - client.lock.RLock() - defer client.lock.RUnlock() - - ret := make([]string, 0, len(client.metadata)) - for topic := range client.metadata { - ret = append(ret, topic) - } - - return ret, nil -} - -func (client *client) Partitions(topic string) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - partitions := client.cachedPartitions(topic, allPartitions) - - if len(partitions) == 0 { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - partitions = client.cachedPartitions(topic, allPartitions) - } - - if partitions == nil { - return nil, ErrUnknownTopicOrPartition - } - - return partitions, nil -} - -func (client *client) WritablePartitions(topic string) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - partitions := client.cachedPartitions(topic, writablePartitions) - - // len==0 catches when it's nil (no such topic) and the odd case when every single - // partition is undergoing leader election simultaneously. Callers have to be able to handle - // this function returning an empty slice (which is a valid return value) but catching it - // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers - // a metadata refresh as a nicety so callers can just try again and don't have to manually - // trigger a refresh (otherwise they'd just keep getting a stale cached copy). - if len(partitions) == 0 { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - partitions = client.cachedPartitions(topic, writablePartitions) - } - - if partitions == nil { - return nil, ErrUnknownTopicOrPartition - } - - return partitions, nil -} - -func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - metadata := client.cachedMetadata(topic, partitionID) - - if metadata == nil { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - metadata = client.cachedMetadata(topic, partitionID) - } - - if metadata == nil { - return nil, ErrUnknownTopicOrPartition - } - - if metadata.Err == ErrReplicaNotAvailable { - return nil, metadata.Err - } - return dupeAndSort(metadata.Replicas), nil -} - -func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - leader, err := client.cachedLeader(topic, partitionID) - - if leader == nil { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - leader, err = client.cachedLeader(topic, partitionID) - } - - return leader, err -} - -func (client *client) RefreshMetadata(topics ...string) error { - if client.Closed() { - return ErrClosedClient - } - - // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper - // error. This handles the case by returning an error instead of sending it - // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 - for _, topic := range topics { - if len(topic) == 0 { - return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return - } - } - - return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max) -} - -func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) { - if client.Closed() { - return -1, ErrClosedClient - } - - offset, err := client.getOffset(topic, partitionID, time) - - if err != nil { - if err := client.RefreshMetadata(topic); err != nil { - return -1, err - } - return client.getOffset(topic, partitionID, time) - } - - return offset, err -} - -func (client *client) Coordinator(consumerGroup string) (*Broker, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - coordinator := client.cachedCoordinator(consumerGroup) - - if coordinator == nil { - if err := client.RefreshCoordinator(consumerGroup); err != nil { - return nil, err - } - coordinator = client.cachedCoordinator(consumerGroup) - } - - if coordinator == nil { - return nil, ErrConsumerCoordinatorNotAvailable - } - - _ = coordinator.Open(client.conf) - return coordinator, nil -} - -func (client *client) RefreshCoordinator(consumerGroup string) error { - if client.Closed() { - return ErrClosedClient - } - - response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max) - if err != nil { - return err - } - - client.lock.Lock() - defer client.lock.Unlock() - client.registerBroker(response.Coordinator) - client.coordinators[consumerGroup] = response.Coordinator.ID() - return nil -} - -// private broker management helpers - -// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered -// in the brokers map. It returns the broker that is registered, which may be the provided broker, -// or a previously registered Broker instance. You must hold the write lock before calling this function. -func (client *client) registerBroker(broker *Broker) { - if client.brokers[broker.ID()] == nil { - client.brokers[broker.ID()] = broker - Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) - } else if broker.Addr() != client.brokers[broker.ID()].Addr() { - safeAsyncClose(client.brokers[broker.ID()]) - client.brokers[broker.ID()] = broker - Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) - } -} - -// deregisterBroker removes a broker from the seedsBroker list, and if it's -// not the seedbroker, removes it from brokers map completely. -func (client *client) deregisterBroker(broker *Broker) { - client.lock.Lock() - defer client.lock.Unlock() - - if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] { - client.deadSeeds = append(client.deadSeeds, broker) - client.seedBrokers = client.seedBrokers[1:] - } else { - // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever, - // but we really shouldn't have to; once that loop is made better this case can be - // removed, and the function generally can be renamed from `deregisterBroker` to - // `nextSeedBroker` or something - Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) - delete(client.brokers, broker.ID()) - } -} - -func (client *client) resurrectDeadBrokers() { - client.lock.Lock() - defer client.lock.Unlock() - - Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds)) - client.seedBrokers = append(client.seedBrokers, client.deadSeeds...) - client.deadSeeds = nil -} - -func (client *client) any() *Broker { - client.lock.RLock() - defer client.lock.RUnlock() - - if len(client.seedBrokers) > 0 { - _ = client.seedBrokers[0].Open(client.conf) - return client.seedBrokers[0] - } - - // not guaranteed to be random *or* deterministic - for _, broker := range client.brokers { - _ = broker.Open(client.conf) - return broker - } - - return nil -} - -// private caching/lazy metadata helpers - -type partitionType int - -const ( - allPartitions partitionType = iota - writablePartitions - // If you add any more types, update the partition cache in update() - - // Ensure this is the last partition type value - maxPartitionIndex -) - -func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata { - client.lock.RLock() - defer client.lock.RUnlock() - - partitions := client.metadata[topic] - if partitions != nil { - return partitions[partitionID] - } - - return nil -} - -func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 { - client.lock.RLock() - defer client.lock.RUnlock() - - partitions, exists := client.cachedPartitionsResults[topic] - - if !exists { - return nil - } - return partitions[partitionSet] -} - -func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 { - partitions := client.metadata[topic] - - if partitions == nil { - return nil - } - - ret := make([]int32, 0, len(partitions)) - for _, partition := range partitions { - if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable { - continue - } - ret = append(ret, partition.ID) - } - - sort.Sort(int32Slice(ret)) - return ret -} - -func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) { - client.lock.RLock() - defer client.lock.RUnlock() - - partitions := client.metadata[topic] - if partitions != nil { - metadata, ok := partitions[partitionID] - if ok { - if metadata.Err == ErrLeaderNotAvailable { - return nil, ErrLeaderNotAvailable - } - b := client.brokers[metadata.Leader] - if b == nil { - return nil, ErrLeaderNotAvailable - } - _ = b.Open(client.conf) - return b, nil - } - } - - return nil, ErrUnknownTopicOrPartition -} - -func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) { - broker, err := client.Leader(topic, partitionID) - if err != nil { - return -1, err - } - - request := &OffsetRequest{} - request.AddBlock(topic, partitionID, time, 1) - - response, err := broker.GetAvailableOffsets(request) - if err != nil { - _ = broker.Close() - return -1, err - } - - block := response.GetBlock(topic, partitionID) - if block == nil { - _ = broker.Close() - return -1, ErrIncompleteResponse - } - if block.Err != ErrNoError { - return -1, block.Err - } - if len(block.Offsets) != 1 { - return -1, ErrOffsetOutOfRange - } - - return block.Offsets[0], nil -} - -// core metadata update logic - -func (client *client) backgroundMetadataUpdater() { - defer close(client.closed) - - if client.conf.Metadata.RefreshFrequency == time.Duration(0) { - return - } - - ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - if err := client.RefreshMetadata(); err != nil { - Logger.Println("Client background metadata update:", err) - } - case <-client.closer: - return - } - } -} - -func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error { - retry := func(err error) error { - if attemptsRemaining > 0 { - Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) - time.Sleep(client.conf.Metadata.Retry.Backoff) - return client.tryRefreshMetadata(topics, attemptsRemaining-1) - } - return err - } - - for broker := client.any(); broker != nil; broker = client.any() { - if len(topics) > 0 { - Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) - } else { - Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) - } - response, err := broker.GetMetadata(&MetadataRequest{Topics: topics}) - - switch err.(type) { - case nil: - // valid response, use it - if shouldRetry, err := client.updateMetadata(response); shouldRetry { - Logger.Println("client/metadata found some partitions to be leaderless") - return retry(err) // note: err can be nil - } else { - return err - } - - case PacketEncodingError: - // didn't even send, return the error - return err - default: - // some other error, remove that broker and try again - Logger.Println("client/metadata got error from broker while fetching metadata:", err) - _ = broker.Close() - client.deregisterBroker(broker) - } - } - - Logger.Println("client/metadata no available broker to send metadata request to") - client.resurrectDeadBrokers() - return retry(ErrOutOfBrokers) -} - -// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable -func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) { - client.lock.Lock() - defer client.lock.Unlock() - - // For all the brokers we received: - // - if it is a new ID, save it - // - if it is an existing ID, but the address we have is stale, discard the old one and save it - // - otherwise ignore it, replacing our existing one would just bounce the connection - for _, broker := range data.Brokers { - client.registerBroker(broker) - } - - for _, topic := range data.Topics { - delete(client.metadata, topic.Name) - delete(client.cachedPartitionsResults, topic.Name) - - switch topic.Err { - case ErrNoError: - break - case ErrInvalidTopic: // don't retry, don't store partial results - err = topic.Err - continue - case ErrUnknownTopicOrPartition: // retry, do not store partial partition results - err = topic.Err - retry = true - continue - case ErrLeaderNotAvailable: // retry, but store partial partition results - retry = true - break - default: // don't retry, don't store partial results - Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err) - err = topic.Err - continue - } - - client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions)) - for _, partition := range topic.Partitions { - client.metadata[topic.Name][partition.ID] = partition - if partition.Err == ErrLeaderNotAvailable { - retry = true - } - } - - var partitionCache [maxPartitionIndex][]int32 - partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions) - partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions) - client.cachedPartitionsResults[topic.Name] = partitionCache - } - - return -} - -func (client *client) cachedCoordinator(consumerGroup string) *Broker { - client.lock.RLock() - defer client.lock.RUnlock() - if coordinatorID, ok := client.coordinators[consumerGroup]; !ok { - return nil - } else { - return client.brokers[coordinatorID] - } -} - -func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) { - retry := func(err error) (*ConsumerMetadataResponse, error) { - if attemptsRemaining > 0 { - Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) - time.Sleep(client.conf.Metadata.Retry.Backoff) - return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) - } - return nil, err - } - - for broker := client.any(); broker != nil; broker = client.any() { - Logger.Printf("client/coordinator requesting coordinator for consumergoup %s from %s\n", consumerGroup, broker.Addr()) - - request := new(ConsumerMetadataRequest) - request.ConsumerGroup = consumerGroup - - response, err := broker.GetConsumerMetadata(request) - - if err != nil { - Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err) - - switch err.(type) { - case PacketEncodingError: - return nil, err - default: - _ = broker.Close() - client.deregisterBroker(broker) - continue - } - } - - switch response.Err { - case ErrNoError: - Logger.Printf("client/coordinator coordinator for consumergoup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) - return response, nil - - case ErrConsumerCoordinatorNotAvailable: - Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup) - - // This is very ugly, but this scenario will only happen once per cluster. - // The __consumer_offsets topic only has to be created one time. - // The number of partitions not configurable, but partition 0 should always exist. - if _, err := client.Leader("__consumer_offsets", 0); err != nil { - Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n") - time.Sleep(2 * time.Second) - } - - return retry(ErrConsumerCoordinatorNotAvailable) - default: - return nil, response.Err - } - } - - Logger.Println("client/coordinator no available broker to send consumer metadata request to") - client.resurrectDeadBrokers() - return retry(ErrOutOfBrokers) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/client_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/client_test.go deleted file mode 100644 index f84b9af31..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/client_test.go +++ /dev/null @@ -1,608 +0,0 @@ -package sarama - -import ( - "io" - "sync" - "testing" - "time" -) - -func safeClose(t testing.TB, c io.Closer) { - err := c.Close() - if err != nil { - t.Error(err) - } -} - -func TestSimpleClient(t *testing.T) { - seedBroker := newMockBroker(t, 1) - - seedBroker.Returns(new(MetadataResponse)) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - seedBroker.Close() - safeClose(t, client) -} - -func TestCachedPartitions(t *testing.T) { - seedBroker := newMockBroker(t, 1) - - replicas := []int32{3, 1, 5} - isr := []int32{5, 1} - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker("localhost:12345", 2) - metadataResponse.AddTopicPartition("my_topic", 0, 2, replicas, isr, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, 2, replicas, isr, ErrLeaderNotAvailable) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - c, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - client := c.(*client) - - // Verify they aren't cached the same - allP := client.cachedPartitionsResults["my_topic"][allPartitions] - writeP := client.cachedPartitionsResults["my_topic"][writablePartitions] - if len(allP) == len(writeP) { - t.Fatal("Invalid lengths!") - } - - tmp := client.cachedPartitionsResults["my_topic"] - // Verify we actually use the cache at all! - tmp[allPartitions] = []int32{1, 2, 3, 4} - client.cachedPartitionsResults["my_topic"] = tmp - if 4 != len(client.cachedPartitions("my_topic", allPartitions)) { - t.Fatal("Not using the cache!") - } - - seedBroker.Close() - safeClose(t, client) -} - -func TestClientDoesntCachePartitionsForTopicsWithErrors(t *testing.T) { - seedBroker := newMockBroker(t, 1) - - replicas := []int32{seedBroker.BrokerID()} - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 1, replicas[0], replicas, replicas, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 2, replicas[0], replicas, replicas, ErrNoError) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - metadataResponse = new(MetadataResponse) - metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) - seedBroker.Returns(metadataResponse) - - partitions, err := client.Partitions("unknown") - - if err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, found", err) - } - if partitions != nil { - t.Errorf("Should return nil as partition list, found %v", partitions) - } - - // Should still use the cache of a known topic - partitions, err = client.Partitions("my_topic") - if err != nil { - t.Errorf("Expected no error, found %v", err) - } - - metadataResponse = new(MetadataResponse) - metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) - seedBroker.Returns(metadataResponse) - - // Should not use cache for unknown topic - partitions, err = client.Partitions("unknown") - if err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, found", err) - } - if partitions != nil { - t.Errorf("Should return nil as partition list, found %v", partitions) - } - - seedBroker.Close() - safeClose(t, client) -} - -func TestClientSeedBrokers(t *testing.T) { - seedBroker := newMockBroker(t, 1) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker("localhost:12345", 2) - seedBroker.Returns(metadataResponse) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - seedBroker.Close() - safeClose(t, client) -} - -func TestClientMetadata(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader := newMockBroker(t, 5) - - replicas := []int32{3, 1, 5} - isr := []int32{5, 1} - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), replicas, isr, ErrNoError) - metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), replicas, isr, ErrLeaderNotAvailable) - seedBroker.Returns(metadataResponse) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - topics, err := client.Topics() - if err != nil { - t.Error(err) - } else if len(topics) != 1 || topics[0] != "my_topic" { - t.Error("Client returned incorrect topics:", topics) - } - - parts, err := client.Partitions("my_topic") - if err != nil { - t.Error(err) - } else if len(parts) != 2 || parts[0] != 0 || parts[1] != 1 { - t.Error("Client returned incorrect partitions for my_topic:", parts) - } - - parts, err = client.WritablePartitions("my_topic") - if err != nil { - t.Error(err) - } else if len(parts) != 1 || parts[0] != 0 { - t.Error("Client returned incorrect writable partitions for my_topic:", parts) - } - - tst, err := client.Leader("my_topic", 0) - if err != nil { - t.Error(err) - } else if tst.ID() != 5 { - t.Error("Leader for my_topic had incorrect ID.") - } - - replicas, err = client.Replicas("my_topic", 0) - if err != nil { - t.Error(err) - } else if replicas[0] != 1 { - t.Error("Incorrect (or unsorted) replica") - } else if replicas[1] != 3 { - t.Error("Incorrect (or unsorted) replica") - } else if replicas[2] != 5 { - t.Error("Incorrect (or unsorted) replica") - } - - leader.Close() - seedBroker.Close() - safeClose(t, client) -} - -func TestClientGetOffset(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader := newMockBroker(t, 2) - leaderAddr := leader.Addr() - - metadata := new(MetadataResponse) - metadata.AddTopicPartition("foo", 0, leader.BrokerID(), nil, nil, ErrNoError) - metadata.AddBroker(leaderAddr, leader.BrokerID()) - seedBroker.Returns(metadata) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - offsetResponse := new(OffsetResponse) - offsetResponse.AddTopicPartition("foo", 0, 123) - leader.Returns(offsetResponse) - - offset, err := client.GetOffset("foo", 0, OffsetNewest) - if err != nil { - t.Error(err) - } - if offset != 123 { - t.Error("Unexpected offset, got ", offset) - } - - leader.Close() - seedBroker.Returns(metadata) - - leader = newMockBrokerAddr(t, 2, leaderAddr) - offsetResponse = new(OffsetResponse) - offsetResponse.AddTopicPartition("foo", 0, 456) - leader.Returns(offsetResponse) - - offset, err = client.GetOffset("foo", 0, OffsetNewest) - if err != nil { - t.Error(err) - } - if offset != 456 { - t.Error("Unexpected offset, got ", offset) - } - - seedBroker.Close() - leader.Close() - safeClose(t, client) -} - -func TestClientReceivingUnknownTopic(t *testing.T) { - seedBroker := newMockBroker(t, 1) - - metadataResponse1 := new(MetadataResponse) - seedBroker.Returns(metadataResponse1) - - config := NewConfig() - config.Metadata.Retry.Max = 1 - config.Metadata.Retry.Backoff = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - metadataUnknownTopic := new(MetadataResponse) - metadataUnknownTopic.AddTopic("new_topic", ErrUnknownTopicOrPartition) - seedBroker.Returns(metadataUnknownTopic) - seedBroker.Returns(metadataUnknownTopic) - - if err := client.RefreshMetadata("new_topic"); err != ErrUnknownTopicOrPartition { - t.Error("ErrUnknownTopicOrPartition expected, got", err) - } - - // If we are asking for the leader of a partition of the non-existing topic. - // we will request metadata again. - seedBroker.Returns(metadataUnknownTopic) - seedBroker.Returns(metadataUnknownTopic) - - if _, err = client.Leader("new_topic", 1); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, got", err) - } - - safeClose(t, client) - seedBroker.Close() -} - -func TestClientReceivingPartialMetadata(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader := newMockBroker(t, 5) - - metadataResponse1 := new(MetadataResponse) - metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) - seedBroker.Returns(metadataResponse1) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - replicas := []int32{leader.BrokerID(), seedBroker.BrokerID()} - - metadataPartial := new(MetadataResponse) - metadataPartial.AddTopic("new_topic", ErrLeaderNotAvailable) - metadataPartial.AddTopicPartition("new_topic", 0, leader.BrokerID(), replicas, replicas, ErrNoError) - metadataPartial.AddTopicPartition("new_topic", 1, -1, replicas, []int32{}, ErrLeaderNotAvailable) - seedBroker.Returns(metadataPartial) - - if err := client.RefreshMetadata("new_topic"); err != nil { - t.Error("ErrLeaderNotAvailable should not make RefreshMetadata respond with an error") - } - - // Even though the metadata was incomplete, we should be able to get the leader of a partition - // for which we did get a useful response, without doing additional requests. - - partition0Leader, err := client.Leader("new_topic", 0) - if err != nil { - t.Error(err) - } else if partition0Leader.Addr() != leader.Addr() { - t.Error("Unexpected leader returned", partition0Leader.Addr()) - } - - // If we are asking for the leader of a partition that didn't have a leader before, - // we will do another metadata request. - - seedBroker.Returns(metadataPartial) - - // Still no leader for the partition, so asking for it should return an error. - _, err = client.Leader("new_topic", 1) - if err != ErrLeaderNotAvailable { - t.Error("Expected ErrLeaderNotAvailable, got", err) - } - - safeClose(t, client) - seedBroker.Close() - leader.Close() -} - -func TestClientRefreshBehaviour(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader := newMockBroker(t, 5) - - metadataResponse1 := new(MetadataResponse) - metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) - seedBroker.Returns(metadataResponse1) - - metadataResponse2 := new(MetadataResponse) - metadataResponse2.AddTopicPartition("my_topic", 0xb, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse2) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - parts, err := client.Partitions("my_topic") - if err != nil { - t.Error(err) - } else if len(parts) != 1 || parts[0] != 0xb { - t.Error("Client returned incorrect partitions for my_topic:", parts) - } - - tst, err := client.Leader("my_topic", 0xb) - if err != nil { - t.Error(err) - } else if tst.ID() != 5 { - t.Error("Leader for my_topic had incorrect ID.") - } - - leader.Close() - seedBroker.Close() - safeClose(t, client) -} - -func TestClientResurrectDeadSeeds(t *testing.T) { - initialSeed := newMockBroker(t, 0) - emptyMetadata := new(MetadataResponse) - initialSeed.Returns(emptyMetadata) - - conf := NewConfig() - conf.Metadata.Retry.Backoff = 0 - conf.Metadata.RefreshFrequency = 0 - c, err := NewClient([]string{initialSeed.Addr()}, conf) - if err != nil { - t.Fatal(err) - } - initialSeed.Close() - - client := c.(*client) - - seed1 := newMockBroker(t, 1) - seed2 := newMockBroker(t, 2) - seed3 := newMockBroker(t, 3) - addr1 := seed1.Addr() - addr2 := seed2.Addr() - addr3 := seed3.Addr() - - // Overwrite the seed brokers with a fixed ordering to make this test deterministic. - safeClose(t, client.seedBrokers[0]) - client.seedBrokers = []*Broker{NewBroker(addr1), NewBroker(addr2), NewBroker(addr3)} - client.deadSeeds = []*Broker{} - - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - if err := client.RefreshMetadata(); err != nil { - t.Error(err) - } - wg.Done() - }() - seed1.Close() - seed2.Close() - - seed1 = newMockBrokerAddr(t, 1, addr1) - seed2 = newMockBrokerAddr(t, 2, addr2) - - seed3.Close() - - seed1.Close() - seed2.Returns(emptyMetadata) - - wg.Wait() - - if len(client.seedBrokers) != 2 { - t.Error("incorrect number of live seeds") - } - if len(client.deadSeeds) != 1 { - t.Error("incorrect number of dead seeds") - } - - safeClose(t, c) -} - -func TestClientCoordinatorWithConsumerOffsetsTopic(t *testing.T) { - seedBroker := newMockBroker(t, 1) - staleCoordinator := newMockBroker(t, 2) - freshCoordinator := newMockBroker(t, 3) - - replicas := []int32{staleCoordinator.BrokerID(), freshCoordinator.BrokerID()} - metadataResponse1 := new(MetadataResponse) - metadataResponse1.AddBroker(staleCoordinator.Addr(), staleCoordinator.BrokerID()) - metadataResponse1.AddBroker(freshCoordinator.Addr(), freshCoordinator.BrokerID()) - metadataResponse1.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError) - seedBroker.Returns(metadataResponse1) - - client, err := NewClient([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - coordinatorResponse1 := new(ConsumerMetadataResponse) - coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable - seedBroker.Returns(coordinatorResponse1) - - coordinatorResponse2 := new(ConsumerMetadataResponse) - coordinatorResponse2.CoordinatorID = staleCoordinator.BrokerID() - coordinatorResponse2.CoordinatorHost = "127.0.0.1" - coordinatorResponse2.CoordinatorPort = staleCoordinator.Port() - - seedBroker.Returns(coordinatorResponse2) - - broker, err := client.Coordinator("my_group") - if err != nil { - t.Error(err) - } - - if staleCoordinator.Addr() != broker.Addr() { - t.Errorf("Expected coordinator to have address %s, found %s", staleCoordinator.Addr(), broker.Addr()) - } - - if staleCoordinator.BrokerID() != broker.ID() { - t.Errorf("Expected coordinator to have ID %d, found %d", staleCoordinator.BrokerID(), broker.ID()) - } - - // Grab the cached value - broker2, err := client.Coordinator("my_group") - if err != nil { - t.Error(err) - } - - if broker2.Addr() != broker.Addr() { - t.Errorf("Expected the coordinator to be the same, but found %s vs. %s", broker2.Addr(), broker.Addr()) - } - - coordinatorResponse3 := new(ConsumerMetadataResponse) - coordinatorResponse3.CoordinatorID = freshCoordinator.BrokerID() - coordinatorResponse3.CoordinatorHost = "127.0.0.1" - coordinatorResponse3.CoordinatorPort = freshCoordinator.Port() - - seedBroker.Returns(coordinatorResponse3) - - // Refresh the locally cahced value because it's stale - if err := client.RefreshCoordinator("my_group"); err != nil { - t.Error(err) - } - - // Grab the fresh value - broker3, err := client.Coordinator("my_group") - if err != nil { - t.Error(err) - } - - if broker3.Addr() != freshCoordinator.Addr() { - t.Errorf("Expected the freshCoordinator to be returned, but found %s.", broker3.Addr()) - } - - freshCoordinator.Close() - staleCoordinator.Close() - seedBroker.Close() - safeClose(t, client) -} - -func TestClientCoordinatorWithoutConsumerOffsetsTopic(t *testing.T) { - seedBroker := newMockBroker(t, 1) - coordinator := newMockBroker(t, 2) - - metadataResponse1 := new(MetadataResponse) - seedBroker.Returns(metadataResponse1) - - config := NewConfig() - config.Metadata.Retry.Max = 1 - config.Metadata.Retry.Backoff = 0 - client, err := NewClient([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - coordinatorResponse1 := new(ConsumerMetadataResponse) - coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable - seedBroker.Returns(coordinatorResponse1) - - metadataResponse2 := new(MetadataResponse) - metadataResponse2.AddTopic("__consumer_offsets", ErrUnknownTopicOrPartition) - seedBroker.Returns(metadataResponse2) - - replicas := []int32{coordinator.BrokerID()} - metadataResponse3 := new(MetadataResponse) - metadataResponse3.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError) - seedBroker.Returns(metadataResponse3) - - coordinatorResponse2 := new(ConsumerMetadataResponse) - coordinatorResponse2.CoordinatorID = coordinator.BrokerID() - coordinatorResponse2.CoordinatorHost = "127.0.0.1" - coordinatorResponse2.CoordinatorPort = coordinator.Port() - - seedBroker.Returns(coordinatorResponse2) - - broker, err := client.Coordinator("my_group") - if err != nil { - t.Error(err) - } - - if coordinator.Addr() != broker.Addr() { - t.Errorf("Expected coordinator to have address %s, found %s", coordinator.Addr(), broker.Addr()) - } - - if coordinator.BrokerID() != broker.ID() { - t.Errorf("Expected coordinator to have ID %d, found %d", coordinator.BrokerID(), broker.ID()) - } - - coordinator.Close() - seedBroker.Close() - safeClose(t, client) -} - -func TestClientAutorefreshShutdownRace(t *testing.T) { - seedBroker := newMockBroker(t, 1) - - metadataResponse := new(MetadataResponse) - seedBroker.Returns(metadataResponse) - - conf := NewConfig() - conf.Metadata.RefreshFrequency = 100 * time.Millisecond - client, err := NewClient([]string{seedBroker.Addr()}, conf) - if err != nil { - t.Fatal(err) - } - - // Wait for the background refresh to kick in - time.Sleep(110 * time.Millisecond) - - done := make(chan none) - go func() { - // Close the client - if err := client.Close(); err != nil { - t.Fatal(err) - } - close(done) - }() - - // Wait for the Close to kick in - time.Sleep(10 * time.Millisecond) - - // Then return some metadata to the still-running background thread - leader := newMockBroker(t, 2) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("foo", 0, leader.BrokerID(), []int32{2}, []int32{2}, ErrNoError) - seedBroker.Returns(metadataResponse) - - <-done - - seedBroker.Close() - - // give the update time to happen so we get a panic if it's still running (which it shouldn't) - time.Sleep(10 * time.Millisecond) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/config.go b/Godeps/_workspace/src/github.com/Shopify/sarama/config.go deleted file mode 100644 index 0fae111e9..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/config.go +++ /dev/null @@ -1,275 +0,0 @@ -package sarama - -import ( - "crypto/tls" - "time" -) - -// Config is used to pass multiple configuration options to Sarama's constructors. -type Config struct { - // Net is the namespace for network-level properties used by the Broker, and shared by the Client/Producer/Consumer. - Net struct { - MaxOpenRequests int // How many outstanding requests a connection is allowed to have before sending on it blocks (default 5). - - // All three of the below configurations are similar to the `socket.timeout.ms` setting in JVM kafka. - DialTimeout time.Duration // How long to wait for the initial connection to succeed before timing out and returning an error (default 30s). - ReadTimeout time.Duration // How long to wait for a response before timing out and returning an error (default 30s). - WriteTimeout time.Duration // How long to wait for a transmit to succeed before timing out and returning an error (default 30s). - - // NOTE: these config values have no compatibility guarantees; they may change when Kafka releases its - // official TLS support in version 0.9. - TLS struct { - Enable bool // Whether or not to use TLS when connecting to the broker (defaults to false). - Config *tls.Config // The TLS configuration to use for secure connections if enabled (defaults to nil). - } - - // KeepAlive specifies the keep-alive period for an active network connection. - // If zero, keep-alives are disabled. (default is 0: disabled). - KeepAlive time.Duration - } - - // Metadata is the namespace for metadata management properties used by the Client, and shared by the Producer/Consumer. - Metadata struct { - Retry struct { - Max int // The total number of times to retry a metadata request when the cluster is in the middle of a leader election (default 3). - Backoff time.Duration // How long to wait for leader election to occur before retrying (default 250ms). Similar to the JVM's `retry.backoff.ms`. - } - // How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes. - // Set to 0 to disable. Similar to `topic.metadata.refresh.interval.ms` in the JVM version. - RefreshFrequency time.Duration - } - - // Producer is the namespace for configuration related to producing messages, used by the Producer. - Producer struct { - // The maximum permitted size of a message (defaults to 1000000). Should be set equal to or smaller than the broker's `message.max.bytes`. - MaxMessageBytes int - // The level of acknowledgement reliability needed from the broker (defaults to WaitForLocal). - // Equivalent to the `request.required.acks` setting of the JVM producer. - RequiredAcks RequiredAcks - // The maximum duration the broker will wait the receipt of the number of RequiredAcks (defaults to 10 seconds). - // This is only relevant when RequiredAcks is set to WaitForAll or a number > 1. Only supports millisecond resolution, - // nanoseconds will be truncated. Equivalent to the JVM producer's `request.timeout.ms` setting. - Timeout time.Duration - // The type of compression to use on messages (defaults to no compression). Similar to `compression.codec` setting of the JVM producer. - Compression CompressionCodec - // Generates partitioners for choosing the partition to send messages to (defaults to hashing the message key). - // Similar to the `partitioner.class` setting for the JVM producer. - Partitioner PartitionerConstructor - - // Return specifies what channels will be populated. If they are set to true, you must read from - // the respective channels to prevent deadlock. - Return struct { - // If enabled, successfully delivered messages will be returned on the Successes channel (default disabled). - Successes bool - - // If enabled, messages that failed to deliver will be returned on the Errors channel, including error (default enabled). - Errors bool - } - - // The following config options control how often messages are batched up and sent to the broker. By default, - // messages are sent as fast as possible, and all messages received while the current batch is in-flight are placed - // into the subsequent batch. - Flush struct { - Bytes int // The best-effort number of bytes needed to trigger a flush. Use the global sarama.MaxRequestSize to set a hard upper limit. - Messages int // The best-effort number of messages needed to trigger a flush. Use `MaxMessages` to set a hard upper limit. - Frequency time.Duration // The best-effort frequency of flushes. Equivalent to `queue.buffering.max.ms` setting of JVM producer. - // The maximum number of messages the producer will send in a single broker request. - // Defaults to 0 for unlimited. Similar to `queue.buffering.max.messages` in the JVM producer. - MaxMessages int - } - - Retry struct { - // The total number of times to retry sending a message (default 3). - // Similar to the `message.send.max.retries` setting of the JVM producer. - Max int - // How long to wait for the cluster to settle between retries (default 100ms). - // Similar to the `retry.backoff.ms` setting of the JVM producer. - Backoff time.Duration - } - } - - // Consumer is the namespace for configuration related to consuming messages, used by the Consumer. - Consumer struct { - Retry struct { - // How long to wait after a failing to read from a partition before trying again (default 2s). - Backoff time.Duration - } - - // Fetch is the namespace for controlling how many bytes are retrieved by any given request. - Fetch struct { - // The minimum number of message bytes to fetch in a request - the broker will wait until at least this many are available. - // The default is 1, as 0 causes the consumer to spin when no messages are available. Equivalent to the JVM's `fetch.min.bytes`. - Min int32 - // The default number of message bytes to fetch from the broker in each request (default 32768). This should be larger than the - // majority of your messages, or else the consumer will spend a lot of time negotiating sizes and not actually consuming. Similar - // to the JVM's `fetch.message.max.bytes`. - Default int32 - // The maximum number of message bytes to fetch from the broker in a single request. Messages larger than this will return - // ErrMessageTooLarge and will not be consumable, so you must be sure this is at least as large as your largest message. - // Defaults to 0 (no limit). Similar to the JVM's `fetch.message.max.bytes`. The global `sarama.MaxResponseSize` still applies. - Max int32 - } - // The maximum amount of time the broker will wait for Consumer.Fetch.Min bytes to become available before it - // returns fewer than that anyways. The default is 250ms, since 0 causes the consumer to spin when no events are available. - // 100-500ms is a reasonable range for most cases. Kafka only supports precision up to milliseconds; nanoseconds will be truncated. - // Equivalent to the JVM's `fetch.wait.max.ms`. - MaxWaitTime time.Duration - - // The maximum amount of time the consumer expects a message takes to process for the user. If writing to the Messages channel - // takes longer than this, that partition will stop fetching more messages until it can proceed again. Note that, since the - // Messages channel is buffered, the actual grace time is (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms. - MaxProcessingTime time.Duration - - // Return specifies what channels will be populated. If they are set to true, you must read from - // them to prevent deadlock. - Return struct { - // If enabled, any errors that occured while consuming are returned on the Errors channel (default disabled). - Errors bool - } - } - - // A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes. - // Defaults to "sarama", but you should probably set it to something specific to your application. - ClientID string - // The number of events to buffer in internal and external channels. This permits the producer and consumer to - // continue processing some messages in the background while user code is working, greatly improving throughput. - // Defaults to 256. - ChannelBufferSize int -} - -// NewConfig returns a new configuration instance with sane defaults. -func NewConfig() *Config { - c := &Config{} - - c.Net.MaxOpenRequests = 5 - c.Net.DialTimeout = 30 * time.Second - c.Net.ReadTimeout = 30 * time.Second - c.Net.WriteTimeout = 30 * time.Second - - c.Metadata.Retry.Max = 3 - c.Metadata.Retry.Backoff = 250 * time.Millisecond - c.Metadata.RefreshFrequency = 10 * time.Minute - - c.Producer.MaxMessageBytes = 1000000 - c.Producer.RequiredAcks = WaitForLocal - c.Producer.Timeout = 10 * time.Second - c.Producer.Partitioner = NewHashPartitioner - c.Producer.Retry.Max = 3 - c.Producer.Retry.Backoff = 100 * time.Millisecond - c.Producer.Return.Errors = true - - c.Consumer.Fetch.Min = 1 - c.Consumer.Fetch.Default = 32768 - c.Consumer.Retry.Backoff = 2 * time.Second - c.Consumer.MaxWaitTime = 250 * time.Millisecond - c.Consumer.MaxProcessingTime = 100 * time.Millisecond - c.Consumer.Return.Errors = false - - c.ChannelBufferSize = 256 - - return c -} - -// Validate checks a Config instance. It will return a -// ConfigurationError if the specified values don't make sense. -func (c *Config) Validate() error { - // some configuration values should be warned on but not fail completely, do those first - if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil { - Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.") - } - if c.Producer.RequiredAcks > 1 { - Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.") - } - if c.Producer.MaxMessageBytes >= int(MaxRequestSize) { - Logger.Println("Producer.MaxMessageBytes is larger than MaxRequestSize; it will be ignored.") - } - if c.Producer.Flush.Bytes >= int(MaxRequestSize) { - Logger.Println("Producer.Flush.Bytes is larger than MaxRequestSize; it will be ignored.") - } - if c.Producer.Timeout%time.Millisecond != 0 { - Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.") - } - if c.Consumer.MaxWaitTime < 100*time.Millisecond { - Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.") - } - if c.Consumer.MaxWaitTime%time.Millisecond != 0 { - Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.") - } - if c.ClientID == "sarama" { - Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.") - } - - // validate Net values - switch { - case c.Net.MaxOpenRequests <= 0: - return ConfigurationError("Net.MaxOpenRequests must be > 0") - case c.Net.DialTimeout <= 0: - return ConfigurationError("Net.DialTimeout must be > 0") - case c.Net.ReadTimeout <= 0: - return ConfigurationError("Net.ReadTimeout must be > 0") - case c.Net.WriteTimeout <= 0: - return ConfigurationError("Net.WriteTimeout must be > 0") - case c.Net.KeepAlive < 0: - return ConfigurationError("Net.KeepAlive must be >= 0") - } - - // validate the Metadata values - switch { - case c.Metadata.Retry.Max < 0: - return ConfigurationError("Metadata.Retry.Max must be >= 0") - case c.Metadata.Retry.Backoff < 0: - return ConfigurationError("Metadata.Retry.Backoff must be >= 0") - case c.Metadata.RefreshFrequency < 0: - return ConfigurationError("Metadata.RefreshFrequency must be >= 0") - } - - // validate the Producer values - switch { - case c.Producer.MaxMessageBytes <= 0: - return ConfigurationError("Producer.MaxMessageBytes must be > 0") - case c.Producer.RequiredAcks < -1: - return ConfigurationError("Producer.RequiredAcks must be >= -1") - case c.Producer.Timeout <= 0: - return ConfigurationError("Producer.Timeout must be > 0") - case c.Producer.Partitioner == nil: - return ConfigurationError("Producer.Partitioner must not be nil") - case c.Producer.Flush.Bytes < 0: - return ConfigurationError("Producer.Flush.Bytes must be >= 0") - case c.Producer.Flush.Messages < 0: - return ConfigurationError("Producer.Flush.Messages must be >= 0") - case c.Producer.Flush.Frequency < 0: - return ConfigurationError("Producer.Flush.Frequency must be >= 0") - case c.Producer.Flush.MaxMessages < 0: - return ConfigurationError("Producer.Flush.MaxMessages must be >= 0") - case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages: - return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set") - case c.Producer.Retry.Max < 0: - return ConfigurationError("Producer.Retry.Max must be >= 0") - case c.Producer.Retry.Backoff < 0: - return ConfigurationError("Producer.Retry.Backoff must be >= 0") - } - - // validate the Consumer values - switch { - case c.Consumer.Fetch.Min <= 0: - return ConfigurationError("Consumer.Fetch.Min must be > 0") - case c.Consumer.Fetch.Default <= 0: - return ConfigurationError("Consumer.Fetch.Default must be > 0") - case c.Consumer.Fetch.Max < 0: - return ConfigurationError("Consumer.Fetch.Max must be >= 0") - case c.Consumer.MaxWaitTime < 1*time.Millisecond: - return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms") - case c.Consumer.MaxProcessingTime <= 0: - return ConfigurationError("Consumer.MaxProcessingTime must be > 0") - case c.Consumer.Retry.Backoff < 0: - return ConfigurationError("Consumer.Retry.Backoff must be >= 0") - } - - // validate misc shared values - switch { - case c.ChannelBufferSize < 0: - return ConfigurationError("ChannelBufferSize must be >= 0") - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/config_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/config_test.go deleted file mode 100644 index 255281a65..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/config_test.go +++ /dev/null @@ -1,10 +0,0 @@ -package sarama - -import "testing" - -func TestDefaultConfigValidates(t *testing.T) { - config := NewConfig() - if err := config.Validate(); err != nil { - t.Error(err) - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer.go deleted file mode 100644 index 43ce3b21b..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer.go +++ /dev/null @@ -1,676 +0,0 @@ -package sarama - -import ( - "errors" - "fmt" - "sync" - "sync/atomic" - "time" -) - -// ConsumerMessage encapsulates a Kafka message returned by the consumer. -type ConsumerMessage struct { - Key, Value []byte - Topic string - Partition int32 - Offset int64 -} - -// ConsumerError is what is provided to the user when an error occurs. -// It wraps an error and includes the topic and partition. -type ConsumerError struct { - Topic string - Partition int32 - Err error -} - -func (ce ConsumerError) Error() string { - return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err) -} - -// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface. -// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors -// when stopping. -type ConsumerErrors []*ConsumerError - -func (ce ConsumerErrors) Error() string { - return fmt.Sprintf("kafka: %d errors while consuming", len(ce)) -} - -// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() -// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of -// scope. -// -// Sarama's Consumer type does not currently support automatic consumer group rebalancing and offset tracking, -// however the https://github.com/wvanbergen/kafka library builds on Sarama to add this support. We plan -// to properly integrate this functionality at a later date. -type Consumer interface { - - // Topics returns the set of available topics as retrieved from the cluster metadata. - // This method is the same as Client.Topics(), and is provided for convenience. - Topics() ([]string, error) - - // Partitions returns the sorted list of all partition IDs for the given topic. - // This method is the same as Client.Pertitions(), and is provided for convenience. - Partitions(topic string) ([]int32, error) - - // ConsumePartition creates a PartitionConsumer on the given topic/partition with the given offset. It will - // return an error if this Consumer is already consuming on the given topic/partition. Offset can be a - // literal offset, or OffsetNewest or OffsetOldest - ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) - - // Close shuts down the consumer. It must be called after all child PartitionConsumers have already been closed. - Close() error -} - -type consumer struct { - client Client - conf *Config - ownClient bool - - lock sync.Mutex - children map[string]map[int32]*partitionConsumer - brokerConsumers map[*Broker]*brokerConsumer -} - -// NewConsumer creates a new consumer using the given broker addresses and configuration. -func NewConsumer(addrs []string, config *Config) (Consumer, error) { - client, err := NewClient(addrs, config) - if err != nil { - return nil, err - } - - c, err := NewConsumerFromClient(client) - if err != nil { - return nil, err - } - c.(*consumer).ownClient = true - return c, nil -} - -// NewConsumerFromClient creates a new consumer using the given client. It is still -// necessary to call Close() on the underlying client when shutting down this consumer. -func NewConsumerFromClient(client Client) (Consumer, error) { - // Check that we are not dealing with a closed Client before processing any other arguments - if client.Closed() { - return nil, ErrClosedClient - } - - c := &consumer{ - client: client, - conf: client.Config(), - children: make(map[string]map[int32]*partitionConsumer), - brokerConsumers: make(map[*Broker]*brokerConsumer), - } - - return c, nil -} - -func (c *consumer) Close() error { - if c.ownClient { - return c.client.Close() - } - return nil -} - -func (c *consumer) Topics() ([]string, error) { - return c.client.Topics() -} - -func (c *consumer) Partitions(topic string) ([]int32, error) { - return c.client.Partitions(topic) -} - -func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) { - child := &partitionConsumer{ - consumer: c, - conf: c.conf, - topic: topic, - partition: partition, - messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), - errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), - feeder: make(chan *FetchResponse, 1), - trigger: make(chan none, 1), - dying: make(chan none), - fetchSize: c.conf.Consumer.Fetch.Default, - } - - if err := child.chooseStartingOffset(offset); err != nil { - return nil, err - } - - var leader *Broker - var err error - if leader, err = c.client.Leader(child.topic, child.partition); err != nil { - return nil, err - } - - if err := c.addChild(child); err != nil { - return nil, err - } - - go withRecover(child.dispatcher) - go withRecover(child.responseFeeder) - - child.broker = c.refBrokerConsumer(leader) - child.broker.input <- child - - return child, nil -} - -func (c *consumer) addChild(child *partitionConsumer) error { - c.lock.Lock() - defer c.lock.Unlock() - - topicChildren := c.children[child.topic] - if topicChildren == nil { - topicChildren = make(map[int32]*partitionConsumer) - c.children[child.topic] = topicChildren - } - - if topicChildren[child.partition] != nil { - return ConfigurationError("That topic/partition is already being consumed") - } - - topicChildren[child.partition] = child - return nil -} - -func (c *consumer) removeChild(child *partitionConsumer) { - c.lock.Lock() - defer c.lock.Unlock() - - delete(c.children[child.topic], child.partition) -} - -func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer { - c.lock.Lock() - defer c.lock.Unlock() - - bc := c.brokerConsumers[broker] - if bc == nil { - bc = c.newBrokerConsumer(broker) - c.brokerConsumers[broker] = bc - } - - bc.refs++ - - return bc -} - -func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) { - c.lock.Lock() - defer c.lock.Unlock() - - brokerWorker.refs-- - - if brokerWorker.refs == 0 { - close(brokerWorker.input) - if c.brokerConsumers[brokerWorker.broker] == brokerWorker { - delete(c.brokerConsumers, brokerWorker.broker) - } - } -} - -func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { - c.lock.Lock() - defer c.lock.Unlock() - - delete(c.brokerConsumers, brokerWorker.broker) -} - -// PartitionConsumer - -// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close() -// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically -// when it passes out of scope. -// -// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range -// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported -// as out of range by the brokers. In this case you should decide what you want to do (try a different offset, -// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying. -// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set -// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement -// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. -type PartitionConsumer interface { - - // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, - // after which you should wait until the 'messages' and 'errors' channel are drained. - // It is required to call this function, or Close before a consumer object passes out of scope, - // as it will otherwise leak memory. You must call this before calling Close on the underlying - // client. - AsyncClose() - - // Close stops the PartitionConsumer from fetching messages. It is required to call this function - // (or AsyncClose) before a consumer object passes out of scope, as it will otherwise leak memory. You must - // call this before calling Close on the underlying client. - Close() error - - // Messages returns the read channel for the messages that are returned by the broker. - Messages() <-chan *ConsumerMessage - - // Errors returns a read channel of errors that occured during consuming, if enabled. By default, - // errors are logged and not returned over this channel. If you want to implement any custom errpr - // handling, set your config's Consumer.Return.Errors setting to true, and read from this channel. - Errors() <-chan *ConsumerError - - // HighWaterMarkOffset returns the high water mark offset of the partition, i.e. the offset that will - // be used for the next message that will be produced. You can use this to determine how far behind - // the processing is. - HighWaterMarkOffset() int64 -} - -type partitionConsumer struct { - consumer *consumer - conf *Config - topic string - partition int32 - - broker *brokerConsumer - messages chan *ConsumerMessage - errors chan *ConsumerError - feeder chan *FetchResponse - - trigger, dying chan none - responseResult error - - fetchSize int32 - offset int64 - highWaterMarkOffset int64 -} - -var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing - -func (child *partitionConsumer) sendError(err error) { - cErr := &ConsumerError{ - Topic: child.topic, - Partition: child.partition, - Err: err, - } - - if child.conf.Consumer.Return.Errors { - child.errors <- cErr - } else { - Logger.Println(cErr) - } -} - -func (child *partitionConsumer) dispatcher() { - for _ = range child.trigger { - select { - case <-child.dying: - close(child.trigger) - case <-time.After(child.conf.Consumer.Retry.Backoff): - if child.broker != nil { - child.consumer.unrefBrokerConsumer(child.broker) - child.broker = nil - } - - Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition) - if err := child.dispatch(); err != nil { - child.sendError(err) - child.trigger <- none{} - } - } - } - - if child.broker != nil { - child.consumer.unrefBrokerConsumer(child.broker) - } - child.consumer.removeChild(child) - close(child.feeder) -} - -func (child *partitionConsumer) dispatch() error { - if err := child.consumer.client.RefreshMetadata(child.topic); err != nil { - return err - } - - var leader *Broker - var err error - if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil { - return err - } - - child.broker = child.consumer.refBrokerConsumer(leader) - - child.broker.input <- child - - return nil -} - -func (child *partitionConsumer) chooseStartingOffset(offset int64) error { - newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest) - if err != nil { - return err - } - oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest) - if err != nil { - return err - } - - switch { - case offset == OffsetNewest: - child.offset = newestOffset - case offset == OffsetOldest: - child.offset = oldestOffset - case offset >= oldestOffset && offset <= newestOffset: - child.offset = offset - default: - return ErrOffsetOutOfRange - } - - return nil -} - -func (child *partitionConsumer) Messages() <-chan *ConsumerMessage { - return child.messages -} - -func (child *partitionConsumer) Errors() <-chan *ConsumerError { - return child.errors -} - -func (child *partitionConsumer) AsyncClose() { - // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes - // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and - // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will - // also just close itself) - close(child.dying) -} - -func (child *partitionConsumer) Close() error { - child.AsyncClose() - - go withRecover(func() { - for _ = range child.messages { - // drain - } - }) - - var errors ConsumerErrors - for err := range child.errors { - errors = append(errors, err) - } - - if len(errors) > 0 { - return errors - } - return nil -} - -func (child *partitionConsumer) HighWaterMarkOffset() int64 { - return atomic.LoadInt64(&child.highWaterMarkOffset) -} - -func (child *partitionConsumer) responseFeeder() { - var msgs []*ConsumerMessage - -feederLoop: - for response := range child.feeder { - msgs, child.responseResult = child.parseResponse(response) - - for i, msg := range msgs { - select { - case child.messages <- msg: - case <-time.After(child.conf.Consumer.MaxProcessingTime): - child.responseResult = errTimedOut - child.broker.acks.Done() - for _, msg = range msgs[i:] { - child.messages <- msg - } - child.broker.input <- child - continue feederLoop - } - } - - child.broker.acks.Done() - } - - close(child.messages) - close(child.errors) -} - -func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { - block := response.GetBlock(child.topic, child.partition) - if block == nil { - return nil, ErrIncompleteResponse - } - - if block.Err != ErrNoError { - return nil, block.Err - } - - if len(block.MsgSet.Messages) == 0 { - // We got no messages. If we got a trailing one then we need to ask for more data. - // Otherwise we just poll again and wait for one to be produced... - if block.MsgSet.PartialTrailingMessage { - if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { - // we can't ask for more data, we've hit the configured limit - child.sendError(ErrMessageTooLarge) - child.offset++ // skip this one so we can keep processing future messages - } else { - child.fetchSize *= 2 - if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max { - child.fetchSize = child.conf.Consumer.Fetch.Max - } - } - } - - return nil, nil - } - - // we got messages, reset our fetch size in case it was increased for a previous request - child.fetchSize = child.conf.Consumer.Fetch.Default - atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) - - incomplete := false - prelude := true - var messages []*ConsumerMessage - for _, msgBlock := range block.MsgSet.Messages { - - for _, msg := range msgBlock.Messages() { - if prelude && msg.Offset < child.offset { - continue - } - prelude = false - - if msg.Offset >= child.offset { - messages = append(messages, &ConsumerMessage{ - Topic: child.topic, - Partition: child.partition, - Key: msg.Msg.Key, - Value: msg.Msg.Value, - Offset: msg.Offset, - }) - child.offset = msg.Offset + 1 - } else { - incomplete = true - } - } - - } - - if incomplete || len(messages) == 0 { - return nil, ErrIncompleteResponse - } - return messages, nil -} - -// brokerConsumer - -type brokerConsumer struct { - consumer *consumer - broker *Broker - input chan *partitionConsumer - newSubscriptions chan []*partitionConsumer - wait chan none - subscriptions map[*partitionConsumer]none - acks sync.WaitGroup - refs int -} - -func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { - bc := &brokerConsumer{ - consumer: c, - broker: broker, - input: make(chan *partitionConsumer), - newSubscriptions: make(chan []*partitionConsumer), - wait: make(chan none), - subscriptions: make(map[*partitionConsumer]none), - refs: 0, - } - - go withRecover(bc.subscriptionManager) - go withRecover(bc.subscriptionConsumer) - - return bc -} - -func (bc *brokerConsumer) subscriptionManager() { - var buffer []*partitionConsumer - - // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer - // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks - // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give - // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, - // so the main goroutine can block waiting for work if it has none. - for { - if len(buffer) > 0 { - select { - case event, ok := <-bc.input: - if !ok { - goto done - } - buffer = append(buffer, event) - case bc.newSubscriptions <- buffer: - buffer = nil - case bc.wait <- none{}: - } - } else { - select { - case event, ok := <-bc.input: - if !ok { - goto done - } - buffer = append(buffer, event) - case bc.newSubscriptions <- nil: - } - } - } - -done: - close(bc.wait) - if len(buffer) > 0 { - bc.newSubscriptions <- buffer - } - close(bc.newSubscriptions) -} - -func (bc *brokerConsumer) subscriptionConsumer() { - <-bc.wait // wait for our first piece of work - - // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available - for newSubscriptions := range bc.newSubscriptions { - for _, child := range newSubscriptions { - bc.subscriptions[child] = none{} - Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) - } - - if len(bc.subscriptions) == 0 { - // We're about to be shut down or we're about to receive more subscriptions. - // Either way, the signal just hasn't propagated to our goroutine yet. - <-bc.wait - continue - } - - response, err := bc.fetchNewMessages() - - if err != nil { - Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err) - bc.abort(err) - return - } - - bc.acks.Add(len(bc.subscriptions)) - for child := range bc.subscriptions { - child.feeder <- response - } - bc.acks.Wait() - bc.handleResponses() - } -} - -func (bc *brokerConsumer) handleResponses() { - // handles the response codes left for us by our subscriptions, and abandons ones that have been closed - for child := range bc.subscriptions { - select { - case <-child.dying: - Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) - close(child.trigger) - delete(bc.subscriptions, child) - default: - result := child.responseResult - child.responseResult = nil - - switch result { - case nil: - break - case errTimedOut: - Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", - bc.broker.ID(), child.topic, child.partition) - delete(bc.subscriptions, child) - case ErrOffsetOutOfRange: - // there's no point in retrying this it will just fail the same way again - // shut it down and force the user to choose what to do - child.sendError(result) - Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result) - close(child.trigger) - delete(bc.subscriptions, child) - case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable: - // not an error, but does need redispatching - Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", - bc.broker.ID(), child.topic, child.partition, result) - child.trigger <- none{} - delete(bc.subscriptions, child) - default: - // dunno, tell the user and try redispatching - child.sendError(result) - Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", - bc.broker.ID(), child.topic, child.partition, result) - child.trigger <- none{} - delete(bc.subscriptions, child) - } - } - } -} - -func (bc *brokerConsumer) abort(err error) { - bc.consumer.abandonBrokerConsumer(bc) - _ = bc.broker.Close() // we don't care about the error this might return, we already have one - - for child := range bc.subscriptions { - child.sendError(err) - child.trigger <- none{} - } - - for newSubscription := range bc.newSubscriptions { - for _, child := range newSubscription { - child.sendError(err) - child.trigger <- none{} - } - } -} - -func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { - request := &FetchRequest{ - MinBytes: bc.consumer.conf.Consumer.Fetch.Min, - MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), - } - - for child := range bc.subscriptions { - request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) - } - - return bc.broker.Fetch(request) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request.go deleted file mode 100644 index 9b8fcd74e..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request.go +++ /dev/null @@ -1,22 +0,0 @@ -package sarama - -type ConsumerMetadataRequest struct { - ConsumerGroup string -} - -func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { - return pe.putString(r.ConsumerGroup) -} - -func (r *ConsumerMetadataRequest) decode(pd packetDecoder) (err error) { - r.ConsumerGroup, err = pd.getString() - return err -} - -func (r *ConsumerMetadataRequest) key() int16 { - return 10 -} - -func (r *ConsumerMetadataRequest) version() int16 { - return 0 -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request_test.go deleted file mode 100644 index 4509631a0..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_request_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package sarama - -import "testing" - -var ( - consumerMetadataRequestEmpty = []byte{ - 0x00, 0x00} - - consumerMetadataRequestString = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r'} -) - -func TestConsumerMetadataRequest(t *testing.T) { - request := new(ConsumerMetadataRequest) - testRequest(t, "empty string", request, consumerMetadataRequestEmpty) - - request.ConsumerGroup = "foobar" - testRequest(t, "with string", request, consumerMetadataRequestString) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response.go deleted file mode 100644 index d6b5614b4..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response.go +++ /dev/null @@ -1,73 +0,0 @@ -package sarama - -import ( - "net" - "strconv" -) - -type ConsumerMetadataResponse struct { - Err KError - Coordinator *Broker - CoordinatorID int32 // deprecated: use Coordinator.ID() - CoordinatorHost string // deprecated: use Coordinator.Addr() - CoordinatorPort int32 // deprecated: use Coordinator.Addr() -} - -func (r *ConsumerMetadataResponse) decode(pd packetDecoder) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - r.Err = KError(tmp) - - coordinator := new(Broker) - if err := coordinator.decode(pd); err != nil { - return err - } - if coordinator.addr == ":0" { - return nil - } - r.Coordinator = coordinator - - // this can all go away in 2.0, but we have to fill in deprecated fields to maintain - // backwards compatibility - host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) - if err != nil { - return err - } - port, err := strconv.ParseInt(portstr, 10, 32) - if err != nil { - return err - } - r.CoordinatorID = r.Coordinator.ID() - r.CoordinatorHost = host - r.CoordinatorPort = int32(port) - - return nil -} - -func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { - pe.putInt16(int16(r.Err)) - if r.Coordinator != nil { - host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) - if err != nil { - return err - } - port, err := strconv.ParseInt(portstr, 10, 32) - if err != nil { - return err - } - pe.putInt32(r.Coordinator.ID()) - if err := pe.putString(host); err != nil { - return err - } - pe.putInt32(int32(port)) - return nil - } - pe.putInt32(r.CoordinatorID) - if err := pe.putString(r.CoordinatorHost); err != nil { - return err - } - pe.putInt32(r.CoordinatorPort) - return nil -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response_test.go deleted file mode 100644 index b748784d7..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_metadata_response_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package sarama - -import "testing" - -var ( - consumerMetadataResponseError = []byte{ - 0x00, 0x0E, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - consumerMetadataResponseSuccess = []byte{ - 0x00, 0x00, - 0x00, 0x00, 0x00, 0xAB, - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x00, 0xCC, 0xDD} -) - -func TestConsumerMetadataResponseError(t *testing.T) { - response := ConsumerMetadataResponse{Err: ErrOffsetsLoadInProgress} - testResponse(t, "error", &response, consumerMetadataResponseError) -} - -func TestConsumerMetadataResponseSuccess(t *testing.T) { - broker := NewBroker("foo:52445") - broker.id = 0xAB - response := ConsumerMetadataResponse{ - Coordinator: broker, - CoordinatorID: 0xAB, - CoordinatorHost: "foo", - CoordinatorPort: 0xCCDD, - Err: ErrNoError, - } - testResponse(t, "success", &response, consumerMetadataResponseSuccess) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_test.go deleted file mode 100644 index df3af07ff..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/consumer_test.go +++ /dev/null @@ -1,844 +0,0 @@ -package sarama - -import ( - "sync" - "testing" - "time" -) - -var testMsg = StringEncoder("Foo") - -// If a particular offset is provided then messages are consumed starting from -// that offset. -func TestConsumerOffsetManual(t *testing.T) { - // Given - broker0 := newMockBroker(t, 0) - - mockFetchResponse := newMockFetchResponse(t, 1) - for i := 0; i < 10; i++ { - mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg) - } - - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": newMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": newMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 0). - SetOffset("my_topic", 0, OffsetNewest, 2345), - "FetchRequest": mockFetchResponse, - }) - - // When - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - consumer, err := master.ConsumePartition("my_topic", 0, 1234) - if err != nil { - t.Fatal(err) - } - - // Then: messages starting from offset 1234 are consumed. - for i := 0; i < 10; i++ { - select { - case message := <-consumer.Messages(): - assertMessageOffset(t, message, int64(i+1234)) - case err := <-consumer.Errors(): - t.Error(err) - } - } - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() -} - -// If `OffsetNewest` is passed as the initial offset then the first consumed -// message is indeed corresponds to the offset that broker claims to be the -// newest in its metadata response. -func TestConsumerOffsetNewest(t *testing.T) { - // Given - broker0 := newMockBroker(t, 0) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": newMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": newMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetNewest, 10). - SetOffset("my_topic", 0, OffsetOldest, 7), - "FetchRequest": newMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 9, testMsg). - SetMessage("my_topic", 0, 10, testMsg). - SetMessage("my_topic", 0, 11, testMsg). - SetHighWaterMark("my_topic", 0, 14), - }) - - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest) - if err != nil { - t.Fatal(err) - } - - // Then - assertMessageOffset(t, <-consumer.Messages(), 10) - if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 { - t.Errorf("Expected high water mark offset 14, found %d", hwmo) - } - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() -} - -// It is possible to close a partition consumer and create the same anew. -func TestConsumerRecreate(t *testing.T) { - // Given - broker0 := newMockBroker(t, 0) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": newMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": newMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 0). - SetOffset("my_topic", 0, OffsetNewest, 1000), - "FetchRequest": newMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 10, testMsg), - }) - - c, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - pc, err := c.ConsumePartition("my_topic", 0, 10) - if err != nil { - t.Fatal(err) - } - assertMessageOffset(t, <-pc.Messages(), 10) - - // When - safeClose(t, pc) - pc, err = c.ConsumePartition("my_topic", 0, 10) - if err != nil { - t.Fatal(err) - } - - // Then - assertMessageOffset(t, <-pc.Messages(), 10) - - safeClose(t, pc) - safeClose(t, c) - broker0.Close() -} - -// An attempt to consume the same partition twice should fail. -func TestConsumerDuplicate(t *testing.T) { - // Given - broker0 := newMockBroker(t, 0) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": newMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": newMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 0). - SetOffset("my_topic", 0, OffsetNewest, 1000), - "FetchRequest": newMockFetchResponse(t, 1), - }) - - config := NewConfig() - config.ChannelBufferSize = 0 - c, err := NewConsumer([]string{broker0.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - pc1, err := c.ConsumePartition("my_topic", 0, 0) - if err != nil { - t.Fatal(err) - } - - // When - pc2, err := c.ConsumePartition("my_topic", 0, 0) - - // Then - if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") { - t.Fatal("A partition cannot be consumed twice at the same time") - } - - safeClose(t, pc1) - safeClose(t, c) - broker0.Close() -} - -// If consumer fails to refresh metadata it keeps retrying with frequency -// specified by `Config.Consumer.Retry.Backoff`. -func TestConsumerLeaderRefreshError(t *testing.T) { - // Given - broker0 := newMockBroker(t, 100) - - // Stage 1: my_topic/0 served by broker0 - Logger.Printf(" STAGE 1") - - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": newMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": newMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 123). - SetOffset("my_topic", 0, OffsetNewest, 1000), - "FetchRequest": newMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 123, testMsg), - }) - - config := NewConfig() - config.Net.ReadTimeout = 100 * time.Millisecond - config.Consumer.Retry.Backoff = 200 * time.Millisecond - config.Consumer.Return.Errors = true - config.Metadata.Retry.Max = 0 - c, err := NewConsumer([]string{broker0.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) - if err != nil { - t.Fatal(err) - } - - assertMessageOffset(t, <-pc.Messages(), 123) - - // Stage 2: broker0 says that it is no longer the leader for my_topic/0, - // but the requests to retrieve metadata fail with network timeout. - Logger.Printf(" STAGE 2") - - fetchResponse2 := &FetchResponse{} - fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition) - - broker0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": newMockWrapper(fetchResponse2), - }) - - if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers { - t.Errorf("Unexpected error: %v", consErr.Err) - } - - // Stage 3: finally the metadata returned by broker0 tells that broker1 is - // a new leader for my_topic/0. Consumption resumes. - - Logger.Printf(" STAGE 3") - - broker1 := newMockBroker(t, 101) - - broker1.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": newMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 124, testMsg), - }) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": newMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetBroker(broker1.Addr(), broker1.BrokerID()). - SetLeader("my_topic", 0, broker1.BrokerID()), - }) - - assertMessageOffset(t, <-pc.Messages(), 124) - - safeClose(t, pc) - safeClose(t, c) - broker1.Close() - broker0.Close() -} - -func TestConsumerInvalidTopic(t *testing.T) { - // Given - broker0 := newMockBroker(t, 100) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": newMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()), - }) - - c, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When - pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) - - // Then - if pc != nil || err != ErrUnknownTopicOrPartition { - t.Errorf("Should fail with, err=%v", err) - } - - safeClose(t, c) - broker0.Close() -} - -// Nothing bad happens if a partition consumer that has no leader assigned at -// the moment is closed. -func TestConsumerClosePartitionWithoutLeader(t *testing.T) { - // Given - broker0 := newMockBroker(t, 100) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": newMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": newMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 123). - SetOffset("my_topic", 0, OffsetNewest, 1000), - "FetchRequest": newMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 123, testMsg), - }) - - config := NewConfig() - config.Net.ReadTimeout = 100 * time.Millisecond - config.Consumer.Retry.Backoff = 100 * time.Millisecond - config.Consumer.Return.Errors = true - config.Metadata.Retry.Max = 0 - c, err := NewConsumer([]string{broker0.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) - if err != nil { - t.Fatal(err) - } - - assertMessageOffset(t, <-pc.Messages(), 123) - - // broker0 says that it is no longer the leader for my_topic/0, but the - // requests to retrieve metadata fail with network timeout. - fetchResponse2 := &FetchResponse{} - fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition) - - broker0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": newMockWrapper(fetchResponse2), - }) - - // When - if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers { - t.Errorf("Unexpected error: %v", consErr.Err) - } - - // Then: the partition consumer can be closed without any problem. - safeClose(t, pc) - safeClose(t, c) - broker0.Close() -} - -// If the initial offset passed on partition consumer creation is out of the -// actual offset range for the partition, then the partition consumer stops -// immediately closing its output channels. -func TestConsumerShutsDownOutOfRange(t *testing.T) { - // Given - broker0 := newMockBroker(t, 0) - broker0.SetHandler(func(req *request) (res encoder) { - switch reqBody := req.body.(type) { - case *MetadataRequest: - return newMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()). - For(reqBody) - case *OffsetRequest: - return newMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 7). - For(reqBody) - case *FetchRequest: - fetchResponse := new(FetchResponse) - fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange) - return fetchResponse - } - return nil - }) - - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, 101) - if err != nil { - t.Fatal(err) - } - - // Then: consumer should shut down closing its messages and errors channels. - if _, ok := <-consumer.Messages(); ok { - t.Error("Expected the consumer to shut down") - } - safeClose(t, consumer) - - safeClose(t, master) - broker0.Close() -} - -// If a fetch response contains messages with offsets that are smaller then -// requested, then such messages are ignored. -func TestConsumerExtraOffsets(t *testing.T) { - // Given - broker0 := newMockBroker(t, 0) - called := 0 - broker0.SetHandler(func(req *request) (res encoder) { - switch req.body.(type) { - case *MetadataRequest: - return newMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()).For(req.body) - case *OffsetRequest: - return newMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 0).For(req.body) - case *FetchRequest: - fetchResponse := &FetchResponse{} - called++ - if called > 1 { - fetchResponse.AddError("my_topic", 0, ErrNoError) - return fetchResponse - } - fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 1) - fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 2) - fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 3) - fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 4) - return fetchResponse - } - return nil - }) - - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, 3) - if err != nil { - t.Fatal(err) - } - - // Then: messages with offsets 1 and 2 are not returned even though they - // are present in the response. - assertMessageOffset(t, <-consumer.Messages(), 3) - assertMessageOffset(t, <-consumer.Messages(), 4) - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() -} - -// It is fine if offsets of fetched messages are not sequential (although -// strictly increasing!). -func TestConsumerNonSequentialOffsets(t *testing.T) { - // Given - broker0 := newMockBroker(t, 0) - called := 0 - broker0.SetHandler(func(req *request) (res encoder) { - switch req.body.(type) { - case *MetadataRequest: - return newMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()).For(req.body) - case *OffsetRequest: - return newMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 0).For(req.body) - case *FetchRequest: - called++ - fetchResponse := &FetchResponse{} - if called > 1 { - fetchResponse.AddError("my_topic", 0, ErrNoError) - return fetchResponse - } - fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 5) - fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 7) - fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 11) - return fetchResponse - } - return nil - }) - - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When - consumer, err := master.ConsumePartition("my_topic", 0, 3) - if err != nil { - t.Fatal(err) - } - - // Then: messages with offsets 1 and 2 are not returned even though they - // are present in the response. - assertMessageOffset(t, <-consumer.Messages(), 5) - assertMessageOffset(t, <-consumer.Messages(), 7) - assertMessageOffset(t, <-consumer.Messages(), 11) - - safeClose(t, consumer) - safeClose(t, master) - broker0.Close() -} - -// If leadership for a partition is changing then consumer resolves the new -// leader and switches to it. -func TestConsumerRebalancingMultiplePartitions(t *testing.T) { - // initial setup - seedBroker := newMockBroker(t, 10) - leader0 := newMockBroker(t, 0) - leader1 := newMockBroker(t, 1) - - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": newMockMetadataResponse(t). - SetBroker(leader0.Addr(), leader0.BrokerID()). - SetBroker(leader1.Addr(), leader1.BrokerID()). - SetLeader("my_topic", 0, leader0.BrokerID()). - SetLeader("my_topic", 1, leader1.BrokerID()), - }) - - mockOffsetResponse1 := newMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 0). - SetOffset("my_topic", 0, OffsetNewest, 1000). - SetOffset("my_topic", 1, OffsetOldest, 0). - SetOffset("my_topic", 1, OffsetNewest, 1000) - leader0.SetHandlerByMap(map[string]MockResponse{ - "OffsetRequest": mockOffsetResponse1, - "FetchRequest": newMockFetchResponse(t, 1), - }) - leader1.SetHandlerByMap(map[string]MockResponse{ - "OffsetRequest": mockOffsetResponse1, - "FetchRequest": newMockFetchResponse(t, 1), - }) - - // launch test goroutines - config := NewConfig() - config.Consumer.Retry.Backoff = 50 - master, err := NewConsumer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - // we expect to end up (eventually) consuming exactly ten messages on each partition - var wg sync.WaitGroup - for i := int32(0); i < 2; i++ { - consumer, err := master.ConsumePartition("my_topic", i, 0) - if err != nil { - t.Error(err) - } - - go func(c PartitionConsumer) { - for err := range c.Errors() { - t.Error(err) - } - }(consumer) - - wg.Add(1) - go func(partition int32, c PartitionConsumer) { - for i := 0; i < 10; i++ { - message := <-consumer.Messages() - if message.Offset != int64(i) { - t.Error("Incorrect message offset!", i, partition, message.Offset) - } - if message.Partition != partition { - t.Error("Incorrect message partition!") - } - } - safeClose(t, consumer) - wg.Done() - }(i, consumer) - } - - time.Sleep(50 * time.Millisecond) - Logger.Printf(" STAGE 1") - // Stage 1: - // * my_topic/0 -> leader0 serves 4 messages - // * my_topic/1 -> leader1 serves 0 messages - - mockFetchResponse := newMockFetchResponse(t, 1) - for i := 0; i < 4; i++ { - mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg) - } - leader0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": mockFetchResponse, - }) - - time.Sleep(50 * time.Millisecond) - Logger.Printf(" STAGE 2") - // Stage 2: - // * leader0 says that it is no longer serving my_topic/0 - // * seedBroker tells that leader1 is serving my_topic/0 now - - // seed broker tells that the new partition 0 leader is leader1 - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": newMockMetadataResponse(t). - SetLeader("my_topic", 0, leader1.BrokerID()). - SetLeader("my_topic", 1, leader1.BrokerID()), - }) - - // leader0 says no longer leader of partition 0 - leader0.SetHandler(func(req *request) (res encoder) { - switch req.body.(type) { - case *FetchRequest: - fetchResponse := new(FetchResponse) - fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition) - return fetchResponse - } - return nil - }) - - time.Sleep(50 * time.Millisecond) - Logger.Printf(" STAGE 3") - // Stage 3: - // * my_topic/0 -> leader1 serves 3 messages - // * my_topic/1 -> leader1 server 8 messages - - // leader1 provides 3 message on partition 0, and 8 messages on partition 1 - mockFetchResponse2 := newMockFetchResponse(t, 2) - for i := 4; i < 7; i++ { - mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg) - } - for i := 0; i < 8; i++ { - mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg) - } - leader1.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": mockFetchResponse2, - }) - - time.Sleep(50 * time.Millisecond) - Logger.Printf(" STAGE 4") - // Stage 4: - // * my_topic/0 -> leader1 serves 3 messages - // * my_topic/1 -> leader1 tells that it is no longer the leader - // * seedBroker tells that leader0 is a new leader for my_topic/1 - - // metadata assigns 0 to leader1 and 1 to leader0 - seedBroker.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": newMockMetadataResponse(t). - SetLeader("my_topic", 0, leader1.BrokerID()). - SetLeader("my_topic", 1, leader0.BrokerID()), - }) - - // leader1 provides three more messages on partition0, says no longer leader of partition1 - mockFetchResponse3 := newMockFetchResponse(t, 3). - SetMessage("my_topic", 0, int64(7), testMsg). - SetMessage("my_topic", 0, int64(8), testMsg). - SetMessage("my_topic", 0, int64(9), testMsg) - leader1.SetHandler(func(req *request) (res encoder) { - switch reqBody := req.body.(type) { - case *FetchRequest: - res := mockFetchResponse3.For(reqBody).(*FetchResponse) - res.AddError("my_topic", 1, ErrNotLeaderForPartition) - return res - - } - return nil - }) - - // leader0 provides two messages on partition 1 - mockFetchResponse4 := newMockFetchResponse(t, 2) - for i := 8; i < 10; i++ { - mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg) - } - leader0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": mockFetchResponse4, - }) - - wg.Wait() - safeClose(t, master) - leader1.Close() - leader0.Close() - seedBroker.Close() -} - -// When two partitions have the same broker as the leader, if one partition -// consumer channel buffer is full then that does not affect the ability to -// read messages by the other consumer. -func TestConsumerInterleavedClose(t *testing.T) { - // Given - broker0 := newMockBroker(t, 0) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": newMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()). - SetLeader("my_topic", 1, broker0.BrokerID()), - "OffsetRequest": newMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 1000). - SetOffset("my_topic", 0, OffsetNewest, 1100). - SetOffset("my_topic", 1, OffsetOldest, 2000). - SetOffset("my_topic", 1, OffsetNewest, 2100), - "FetchRequest": newMockFetchResponse(t, 1). - SetMessage("my_topic", 0, 1000, testMsg). - SetMessage("my_topic", 0, 1001, testMsg). - SetMessage("my_topic", 0, 1002, testMsg). - SetMessage("my_topic", 1, 2000, testMsg), - }) - - config := NewConfig() - config.ChannelBufferSize = 0 - master, err := NewConsumer([]string{broker0.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - c0, err := master.ConsumePartition("my_topic", 0, 1000) - if err != nil { - t.Fatal(err) - } - - c1, err := master.ConsumePartition("my_topic", 1, 2000) - if err != nil { - t.Fatal(err) - } - - // When/Then: we can read from partition 0 even if nobody reads from partition 1 - assertMessageOffset(t, <-c0.Messages(), 1000) - assertMessageOffset(t, <-c0.Messages(), 1001) - assertMessageOffset(t, <-c0.Messages(), 1002) - - safeClose(t, c1) - safeClose(t, c0) - safeClose(t, master) - broker0.Close() -} - -func TestConsumerBounceWithReferenceOpen(t *testing.T) { - broker0 := newMockBroker(t, 0) - broker0Addr := broker0.Addr() - broker1 := newMockBroker(t, 1) - - mockMetadataResponse := newMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetBroker(broker1.Addr(), broker1.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()). - SetLeader("my_topic", 1, broker1.BrokerID()) - - mockOffsetResponse := newMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetOldest, 1000). - SetOffset("my_topic", 0, OffsetNewest, 1100). - SetOffset("my_topic", 1, OffsetOldest, 2000). - SetOffset("my_topic", 1, OffsetNewest, 2100) - - mockFetchResponse := newMockFetchResponse(t, 1) - for i := 0; i < 10; i++ { - mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg) - mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg) - } - - broker0.SetHandlerByMap(map[string]MockResponse{ - "OffsetRequest": mockOffsetResponse, - "FetchRequest": mockFetchResponse, - }) - broker1.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": mockMetadataResponse, - "OffsetRequest": mockOffsetResponse, - "FetchRequest": mockFetchResponse, - }) - - config := NewConfig() - config.Consumer.Return.Errors = true - config.Consumer.Retry.Backoff = 100 * time.Millisecond - config.ChannelBufferSize = 1 - master, err := NewConsumer([]string{broker1.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - c0, err := master.ConsumePartition("my_topic", 0, 1000) - if err != nil { - t.Fatal(err) - } - - c1, err := master.ConsumePartition("my_topic", 1, 2000) - if err != nil { - t.Fatal(err) - } - - // read messages from both partition to make sure that both brokers operate - // normally. - assertMessageOffset(t, <-c0.Messages(), 1000) - assertMessageOffset(t, <-c1.Messages(), 2000) - - // Simulate broker shutdown. Note that metadata response does not change, - // that is the leadership does not move to another broker. So partition - // consumer will keep retrying to restore the connection with the broker. - broker0.Close() - - // Make sure that while the partition/0 leader is down, consumer/partition/1 - // is capable of pulling messages from broker1. - for i := 1; i < 7; i++ { - offset := (<-c1.Messages()).Offset - if offset != int64(2000+i) { - t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i)) - } - } - - // Bring broker0 back to service. - broker0 = newMockBrokerAddr(t, 0, broker0Addr) - broker0.SetHandlerByMap(map[string]MockResponse{ - "FetchRequest": mockFetchResponse, - }) - - // Read the rest of messages from both partitions. - for i := 7; i < 10; i++ { - assertMessageOffset(t, <-c1.Messages(), int64(2000+i)) - } - for i := 1; i < 10; i++ { - assertMessageOffset(t, <-c0.Messages(), int64(1000+i)) - } - - select { - case <-c0.Errors(): - default: - t.Errorf("Partition consumer should have detected broker restart") - } - - safeClose(t, c1) - safeClose(t, c0) - safeClose(t, master) - broker0.Close() - broker1.Close() -} - -func TestConsumerOffsetOutOfRange(t *testing.T) { - // Given - broker0 := newMockBroker(t, 2) - broker0.SetHandlerByMap(map[string]MockResponse{ - "MetadataRequest": newMockMetadataResponse(t). - SetBroker(broker0.Addr(), broker0.BrokerID()). - SetLeader("my_topic", 0, broker0.BrokerID()), - "OffsetRequest": newMockOffsetResponse(t). - SetOffset("my_topic", 0, OffsetNewest, 1234). - SetOffset("my_topic", 0, OffsetOldest, 2345), - }) - - master, err := NewConsumer([]string{broker0.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - // When/Then - if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange { - t.Fatal("Should return ErrOffsetOutOfRange, got:", err) - } - if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange { - t.Fatal("Should return ErrOffsetOutOfRange, got:", err) - } - if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange { - t.Fatal("Should return ErrOffsetOutOfRange, got:", err) - } - - safeClose(t, master) - broker0.Close() -} - -func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) { - if msg.Offset != expectedOffset { - t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset) - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/crc32_field.go b/Godeps/_workspace/src/github.com/Shopify/sarama/crc32_field.go deleted file mode 100644 index f4fde18ad..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/crc32_field.go +++ /dev/null @@ -1,35 +0,0 @@ -package sarama - -import ( - "encoding/binary" - "hash/crc32" -) - -// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. -type crc32Field struct { - startOffset int -} - -func (c *crc32Field) saveOffset(in int) { - c.startOffset = in -} - -func (c *crc32Field) reserveLength() int { - return 4 -} - -func (c *crc32Field) run(curOffset int, buf []byte) error { - crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) - binary.BigEndian.PutUint32(buf[c.startOffset:], crc) - return nil -} - -func (c *crc32Field) check(curOffset int, buf []byte) error { - crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) - - if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) { - return PacketDecodingError{"CRC didn't match"} - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/encoder_decoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/encoder_decoder.go deleted file mode 100644 index b91efaa0e..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/encoder_decoder.go +++ /dev/null @@ -1,62 +0,0 @@ -package sarama - -import "fmt" - -// Encoder is the interface that wraps the basic Encode method. -// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules. -type encoder interface { - encode(pe packetEncoder) error -} - -// Encode takes an Encoder and turns it into bytes. -func encode(e encoder) ([]byte, error) { - if e == nil { - return nil, nil - } - - var prepEnc prepEncoder - var realEnc realEncoder - - err := e.encode(&prepEnc) - if err != nil { - return nil, err - } - - if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) { - return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)} - } - - realEnc.raw = make([]byte, prepEnc.length) - err = e.encode(&realEnc) - if err != nil { - return nil, err - } - - return realEnc.raw, nil -} - -// Decoder is the interface that wraps the basic Decode method. -// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules. -type decoder interface { - decode(pd packetDecoder) error -} - -// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes, -// interpreted using Kafka's encoding rules. -func decode(buf []byte, in decoder) error { - if buf == nil { - return nil - } - - helper := realDecoder{raw: buf} - err := in.decode(&helper) - if err != nil { - return err - } - - if helper.off != len(buf) { - return PacketDecodingError{"invalid length"} - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/errors.go b/Godeps/_workspace/src/github.com/Shopify/sarama/errors.go deleted file mode 100644 index 70f2b9bfd..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/errors.go +++ /dev/null @@ -1,146 +0,0 @@ -package sarama - -import ( - "errors" - "fmt" -) - -// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored -// or otherwise failed to respond. -var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)") - -// ErrClosedClient is the error returned when a method is called on a client that has been closed. -var ErrClosedClient = errors.New("kafka: tried to use a client that was closed") - -// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does -// not contain the expected information. -var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks") - -// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index -// (meaning one outside of the range [0...numPartitions-1]). -var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index") - -// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting. -var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated") - -// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected. -var ErrNotConnected = errors.New("kafka: broker not connected") - -// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected -// when requesting messages, since as an optimization the server is allowed to return a partial message at the end -// of the message set. -var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected") - -// ErrShuttingDown is returned when a producer receives a message during shutdown. -var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down") - -// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max -var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max") - -// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, -// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. -type PacketEncodingError struct { - Info string -} - -func (err PacketEncodingError) Error() string { - return fmt.Sprintf("kafka: error encoding packet: %s", err.Info) -} - -// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response. -// This can be a bad CRC or length field, or any other invalid value. -type PacketDecodingError struct { - Info string -} - -func (err PacketDecodingError) Error() string { - return fmt.Sprintf("kafka: error decoding packet: %s", err.Info) -} - -// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer) -// when the specified configuration is invalid. -type ConfigurationError string - -func (err ConfigurationError) Error() string { - return "kafka: invalid configuration (" + string(err) + ")" -} - -// KError is the type of error that can be returned directly by the Kafka broker. -// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes -type KError int16 - -// Numeric error codes returned by the Kafka server. -const ( - ErrNoError KError = 0 - ErrUnknown KError = -1 - ErrOffsetOutOfRange KError = 1 - ErrInvalidMessage KError = 2 - ErrUnknownTopicOrPartition KError = 3 - ErrInvalidMessageSize KError = 4 - ErrLeaderNotAvailable KError = 5 - ErrNotLeaderForPartition KError = 6 - ErrRequestTimedOut KError = 7 - ErrBrokerNotAvailable KError = 8 - ErrReplicaNotAvailable KError = 9 - ErrMessageSizeTooLarge KError = 10 - ErrStaleControllerEpochCode KError = 11 - ErrOffsetMetadataTooLarge KError = 12 - ErrOffsetsLoadInProgress KError = 14 - ErrConsumerCoordinatorNotAvailable KError = 15 - ErrNotCoordinatorForConsumer KError = 16 - ErrInvalidTopic KError = 17 - ErrMessageSetSizeTooLarge KError = 18 - ErrNotEnoughReplicas KError = 19 - ErrNotEnoughReplicasAfterAppend KError = 20 -) - -func (err KError) Error() string { - // Error messages stolen/adapted from - // https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol - switch err { - case ErrNoError: - return "kafka server: Not an error, why are you printing me?" - case ErrUnknown: - return "kafka server: Unexpected (unknown?) server error." - case ErrOffsetOutOfRange: - return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition." - case ErrInvalidMessage: - return "kafka server: Message contents does not match its CRC." - case ErrUnknownTopicOrPartition: - return "kafka server: Request was for a topic or partition that does not exist on this broker." - case ErrInvalidMessageSize: - return "kafka server: The message has a negative size." - case ErrLeaderNotAvailable: - return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes." - case ErrNotLeaderForPartition: - return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date." - case ErrRequestTimedOut: - return "kafka server: Request exceeded the user-specified time limit in the request." - case ErrBrokerNotAvailable: - return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!" - case ErrReplicaNotAvailable: - return "kafka server: Replica infomation not available, one or more brokers are down." - case ErrMessageSizeTooLarge: - return "kafka server: Message was too large, server rejected it to avoid allocation error." - case ErrStaleControllerEpochCode: - return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)." - case ErrOffsetMetadataTooLarge: - return "kafka server: Specified a string larger than the configured maximum for offset metadata." - case ErrOffsetsLoadInProgress: - return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition." - case ErrConsumerCoordinatorNotAvailable: - return "kafka server: Offset's topic has not yet been created." - case ErrNotCoordinatorForConsumer: - return "kafka server: Request was for a consumer group that is not coordinated by this broker." - case ErrInvalidTopic: - return "kafka server: The request attempted to perform an operation on an invalid topic." - case ErrMessageSetSizeTooLarge: - return "kafka server: The request included message batch larger than the configured segment size on the server." - case ErrNotEnoughReplicas: - return "kafka server: Messages are rejected since there are fewer in-sync replicas than required." - case ErrNotEnoughReplicasAfterAppend: - return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required." - } - - return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/README.md deleted file mode 100644 index b6588051e..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Sarama examples - -This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarams's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama) - -In these examples, we use `github.com/Shopify/sarama` as import path. We do this to ensure all the examples are up to date with the latest changes in Sarama. For your own applications, you may want to use `gopkg.in/Shopify/sarama.v1` to lock into a stable API version. - -#### HTTP server - -[http_server](./http_server) is a simple HTTP server uses both the sync producer to produce data as part of the request handling cycle, as well as the async producer to maintain an access log. It also uses the [mocks subpackage](https://godoc.org/github.com/Shopify/sarama/mocks) to test both. diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/.gitignore deleted file mode 100644 index 9f6ed425f..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -http_server -http_server.test diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/README.md deleted file mode 100644 index 5ff2bc253..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# HTTP server example - -This HTTP server example shows you how to use the AsyncProducer and SyncProducer, and how to test them using mocks. The server simply sends the data of the HTTP request's query string to Kafka, and send a 200 result if that succeeds. For every request, it will send an access log entry to Kafka as well in the background. - -If you need to know whether a message was successfully sent to the Kafka cluster before you can send your HTTP response, using the `SyncProducer` is probably the simplest way to achieve this. If you don't care, e.g. for the access log, using the `AsyncProducer` will let you fire and forget. You can send the HTTP response, while the message is being produced in the background. - -One important thing to note is that both the `SyncProducer` and `AsyncProducer` are **thread-safe**. Go's `http.Server` handles requests concurrently in different goroutines, but you can use a single producer safely. This will actually achieve efficiency gains as the producer will be able to batch messages from concurrent requests together. diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server.go b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server.go deleted file mode 100644 index 03e47b6b2..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server.go +++ /dev/null @@ -1,246 +0,0 @@ -package main - -import ( - "github.com/Shopify/sarama" - - "crypto/tls" - "crypto/x509" - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "log" - "net/http" - "os" - "strings" - "time" -) - -var ( - addr = flag.String("addr", ":8080", "The address to bind to") - brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list") - verbose = flag.Bool("verbose", false, "Turn on Sarama logging") - certFile = flag.String("certificate", "", "The optional certificate file for client authentication") - keyFile = flag.String("key", "", "The optional key file for client authentication") - caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication") - verifySsl = flag.Bool("verify", false, "Optional verify ssl certificates chain") -) - -func main() { - flag.Parse() - - if *verbose { - sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) - } - - if *brokers == "" { - flag.PrintDefaults() - os.Exit(1) - } - - brokerList := strings.Split(*brokers, ",") - log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", ")) - - server := &Server{ - DataCollector: newDataCollector(brokerList), - AccessLogProducer: newAccessLogProducer(brokerList), - } - defer func() { - if err := server.Close(); err != nil { - log.Println("Failed to close server", err) - } - }() - - log.Fatal(server.Run(*addr)) -} - -func createTlsConfiguration() (t *tls.Config) { - if *certFile != "" && *keyFile != "" && *caFile != "" { - cert, err := tls.LoadX509KeyPair(*certFile, *keyFile) - if err != nil { - log.Fatal(err) - } - - caCert, err := ioutil.ReadFile(*caFile) - if err != nil { - log.Fatal(err) - } - - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(caCert) - - t = &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: caCertPool, - InsecureSkipVerify: *verifySsl, - } - } - // will be nil by default if nothing is provided - return t -} - -type Server struct { - DataCollector sarama.SyncProducer - AccessLogProducer sarama.AsyncProducer -} - -func (s *Server) Close() error { - if err := s.DataCollector.Close(); err != nil { - log.Println("Failed to shut down data collector cleanly", err) - } - - if err := s.AccessLogProducer.Close(); err != nil { - log.Println("Failed to shut down access log producer cleanly", err) - } - - return nil -} - -func (s *Server) Handler() http.Handler { - return s.withAccessLog(s.collectQueryStringData()) -} - -func (s *Server) Run(addr string) error { - httpServer := &http.Server{ - Addr: addr, - Handler: s.Handler(), - } - - log.Printf("Listening for requests on %s...\n", addr) - return httpServer.ListenAndServe() -} - -func (s *Server) collectQueryStringData() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.NotFound(w, r) - return - } - - // We are not setting a message key, which means that all messages will - // be distributed randomly over the different partitions. - partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{ - Topic: "important", - Value: sarama.StringEncoder(r.URL.RawQuery), - }) - - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "Failed to store your data:, %s", err) - } else { - // The tuple (topic, partition, offset) can be used as a unique identifier - // for a message in a Kafka cluster. - fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset) - } - }) -} - -type accessLogEntry struct { - Method string `json:"method"` - Host string `json:"host"` - Path string `json:"path"` - IP string `json:"ip"` - ResponseTime float64 `json:"response_time"` - - encoded []byte - err error -} - -func (ale *accessLogEntry) ensureEncoded() { - if ale.encoded == nil && ale.err == nil { - ale.encoded, ale.err = json.Marshal(ale) - } -} - -func (ale *accessLogEntry) Length() int { - ale.ensureEncoded() - return len(ale.encoded) -} - -func (ale *accessLogEntry) Encode() ([]byte, error) { - ale.ensureEncoded() - return ale.encoded, ale.err -} - -func (s *Server) withAccessLog(next http.Handler) http.Handler { - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - started := time.Now() - - next.ServeHTTP(w, r) - - entry := &accessLogEntry{ - Method: r.Method, - Host: r.Host, - Path: r.RequestURI, - IP: r.RemoteAddr, - ResponseTime: float64(time.Since(started)) / float64(time.Second), - } - - // We will use the client's IP address as key. This will cause - // all the access log entries of the same IP address to end up - // on the same partition. - s.AccessLogProducer.Input() <- &sarama.ProducerMessage{ - Topic: "access_log", - Key: sarama.StringEncoder(r.RemoteAddr), - Value: entry, - } - }) -} - -func newDataCollector(brokerList []string) sarama.SyncProducer { - - // For the data collector, we are looking for strong consistency semantics. - // Because we don't change the flush settings, sarama will try to produce messages - // as fast as possible to keep latency low. - config := sarama.NewConfig() - config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message - config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message - tlsConfig := createTlsConfiguration() - if tlsConfig != nil { - config.Net.TLS.Config = tlsConfig - config.Net.TLS.Enable = true - } - - // On the broker side, you may want to change the following settings to get - // stronger consistency guarantees: - // - For your broker, set `unclean.leader.election.enable` to false - // - For the topic, you could increase `min.insync.replicas`. - - producer, err := sarama.NewSyncProducer(brokerList, config) - if err != nil { - log.Fatalln("Failed to start Sarama producer:", err) - } - - return producer -} - -func newAccessLogProducer(brokerList []string) sarama.AsyncProducer { - - // For the access log, we are looking for AP semantics, with high throughput. - // By creating batches of compressed messages, we reduce network I/O at a cost of more latency. - config := sarama.NewConfig() - tlsConfig := createTlsConfiguration() - if tlsConfig != nil { - config.Net.TLS.Enable = true - config.Net.TLS.Config = tlsConfig - } - config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack - config.Producer.Compression = sarama.CompressionSnappy // Compress messages - config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms - - producer, err := sarama.NewAsyncProducer(brokerList, config) - if err != nil { - log.Fatalln("Failed to start Sarama producer:", err) - } - - // We will just log to STDOUT if we're not able to produce messages. - // Note: messages will only be returned here after all retry attempts are exhausted. - go func() { - for err := range producer.Errors() { - log.Println("Failed to write access log entry:", err) - } - }() - - return producer -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server_test.go deleted file mode 100644 index 7b2451e28..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/examples/http_server/http_server_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package main - -import ( - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/Shopify/sarama" - "github.com/Shopify/sarama/mocks" -) - -// In normal operation, we expect one access log entry, -// and one data collector entry. Let's assume both will succeed. -// We should return a HTTP 200 status. -func TestCollectSuccessfully(t *testing.T) { - dataCollectorMock := mocks.NewSyncProducer(t, nil) - dataCollectorMock.ExpectSendMessageAndSucceed() - - accessLogProducerMock := mocks.NewAsyncProducer(t, nil) - accessLogProducerMock.ExpectInputAndSucceed() - - // Now, use dependency injection to use the mocks. - s := &Server{ - DataCollector: dataCollectorMock, - AccessLogProducer: accessLogProducerMock, - } - - // The Server's Close call is important; it will call Close on - // the two mock producers, which will then validate whether all - // expectations are resolved. - defer safeClose(t, s) - - req, err := http.NewRequest("GET", "http://example.com/?data", nil) - if err != nil { - t.Fatal(err) - } - res := httptest.NewRecorder() - s.Handler().ServeHTTP(res, req) - - if res.Code != 200 { - t.Errorf("Expected HTTP status 200, found %d", res.Code) - } - - if string(res.Body.Bytes()) != "Your data is stored with unique identifier important/0/1" { - t.Error("Unexpected response body", res.Body) - } -} - -// Now, let's see if we handle the case of not being able to produce -// to the data collector properly. In this case we should return a 500 status. -func TestCollectionFailure(t *testing.T) { - dataCollectorMock := mocks.NewSyncProducer(t, nil) - dataCollectorMock.ExpectSendMessageAndFail(sarama.ErrRequestTimedOut) - - accessLogProducerMock := mocks.NewAsyncProducer(t, nil) - accessLogProducerMock.ExpectInputAndSucceed() - - s := &Server{ - DataCollector: dataCollectorMock, - AccessLogProducer: accessLogProducerMock, - } - defer safeClose(t, s) - - req, err := http.NewRequest("GET", "http://example.com/?data", nil) - if err != nil { - t.Fatal(err) - } - res := httptest.NewRecorder() - s.Handler().ServeHTTP(res, req) - - if res.Code != 500 { - t.Errorf("Expected HTTP status 500, found %d", res.Code) - } -} - -// We don't expect any data collector calls because the path is wrong, -// so we are not setting any expectations on the dataCollectorMock. It -// will still generate an access log entry though. -func TestWrongPath(t *testing.T) { - dataCollectorMock := mocks.NewSyncProducer(t, nil) - - accessLogProducerMock := mocks.NewAsyncProducer(t, nil) - accessLogProducerMock.ExpectInputAndSucceed() - - s := &Server{ - DataCollector: dataCollectorMock, - AccessLogProducer: accessLogProducerMock, - } - defer safeClose(t, s) - - req, err := http.NewRequest("GET", "http://example.com/wrong?data", nil) - if err != nil { - t.Fatal(err) - } - res := httptest.NewRecorder() - - s.Handler().ServeHTTP(res, req) - - if res.Code != 404 { - t.Errorf("Expected HTTP status 404, found %d", res.Code) - } -} - -func safeClose(t *testing.T, o io.Closer) { - if err := o.Close(); err != nil { - t.Error(err) - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request.go deleted file mode 100644 index 3c00fad65..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request.go +++ /dev/null @@ -1,123 +0,0 @@ -package sarama - -type fetchRequestBlock struct { - fetchOffset int64 - maxBytes int32 -} - -func (f *fetchRequestBlock) encode(pe packetEncoder) error { - pe.putInt64(f.fetchOffset) - pe.putInt32(f.maxBytes) - return nil -} - -func (f *fetchRequestBlock) decode(pd packetDecoder) (err error) { - if f.fetchOffset, err = pd.getInt64(); err != nil { - return err - } - if f.maxBytes, err = pd.getInt32(); err != nil { - return err - } - return nil -} - -type FetchRequest struct { - MaxWaitTime int32 - MinBytes int32 - blocks map[string]map[int32]*fetchRequestBlock -} - -func (f *FetchRequest) encode(pe packetEncoder) (err error) { - pe.putInt32(-1) // replica ID is always -1 for clients - pe.putInt32(f.MaxWaitTime) - pe.putInt32(f.MinBytes) - err = pe.putArrayLength(len(f.blocks)) - if err != nil { - return err - } - for topic, blocks := range f.blocks { - err = pe.putString(topic) - if err != nil { - return err - } - err = pe.putArrayLength(len(blocks)) - if err != nil { - return err - } - for partition, block := range blocks { - pe.putInt32(partition) - err = block.encode(pe) - if err != nil { - return err - } - } - } - return nil -} - -func (f *FetchRequest) decode(pd packetDecoder) (err error) { - if _, err = pd.getInt32(); err != nil { - return err - } - if f.MaxWaitTime, err = pd.getInt32(); err != nil { - return err - } - if f.MinBytes, err = pd.getInt32(); err != nil { - return err - } - topicCount, err := pd.getArrayLength() - if err != nil { - return err - } - if topicCount == 0 { - return nil - } - f.blocks = make(map[string]map[int32]*fetchRequestBlock) - for i := 0; i < topicCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - f.blocks[topic] = make(map[int32]*fetchRequestBlock) - for j := 0; j < partitionCount; j++ { - partition, err := pd.getInt32() - if err != nil { - return err - } - fetchBlock := &fetchRequestBlock{} - if err = fetchBlock.decode(pd); err != nil { - return nil - } - f.blocks[topic][partition] = fetchBlock - } - } - return nil -} - -func (f *FetchRequest) key() int16 { - return 1 -} - -func (f *FetchRequest) version() int16 { - return 0 -} - -func (f *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { - if f.blocks == nil { - f.blocks = make(map[string]map[int32]*fetchRequestBlock) - } - - if f.blocks[topic] == nil { - f.blocks[topic] = make(map[int32]*fetchRequestBlock) - } - - tmp := new(fetchRequestBlock) - tmp.maxBytes = maxBytes - tmp.fetchOffset = fetchOffset - - f.blocks[topic][partitionID] = tmp -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request_test.go deleted file mode 100644 index 32c083c7d..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_request_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package sarama - -import "testing" - -var ( - fetchRequestNoBlocks = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - fetchRequestWithProperties = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xEF, - 0x00, 0x00, 0x00, 0x00} - - fetchRequestOneBlock = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56} -) - -func TestFetchRequest(t *testing.T) { - request := new(FetchRequest) - testRequest(t, "no blocks", request, fetchRequestNoBlocks) - - request.MaxWaitTime = 0x20 - request.MinBytes = 0xEF - testRequest(t, "with properties", request, fetchRequestWithProperties) - - request.MaxWaitTime = 0 - request.MinBytes = 0 - request.AddBlock("topic", 0x12, 0x34, 0x56) - testRequest(t, "one block", request, fetchRequestOneBlock) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response.go deleted file mode 100644 index 1ac543921..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response.go +++ /dev/null @@ -1,173 +0,0 @@ -package sarama - -type FetchResponseBlock struct { - Err KError - HighWaterMarkOffset int64 - MsgSet MessageSet -} - -func (pr *FetchResponseBlock) decode(pd packetDecoder) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - pr.Err = KError(tmp) - - pr.HighWaterMarkOffset, err = pd.getInt64() - if err != nil { - return err - } - - msgSetSize, err := pd.getInt32() - if err != nil { - return err - } - - msgSetDecoder, err := pd.getSubset(int(msgSetSize)) - if err != nil { - return err - } - err = (&pr.MsgSet).decode(msgSetDecoder) - - return err -} - -type FetchResponse struct { - Blocks map[string]map[int32]*FetchResponseBlock -} - -func (pr *FetchResponseBlock) encode(pe packetEncoder) (err error) { - pe.putInt16(int16(pr.Err)) - - pe.putInt64(pr.HighWaterMarkOffset) - - pe.push(&lengthField{}) - err = pr.MsgSet.encode(pe) - if err != nil { - return err - } - return pe.pop() -} - -func (fr *FetchResponse) decode(pd packetDecoder) (err error) { - numTopics, err := pd.getArrayLength() - if err != nil { - return err - } - - fr.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numBlocks, err := pd.getArrayLength() - if err != nil { - return err - } - - fr.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks) - - for j := 0; j < numBlocks; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - block := new(FetchResponseBlock) - err = block.decode(pd) - if err != nil { - return err - } - fr.Blocks[name][id] = block - } - } - - return nil -} - -func (fr *FetchResponse) encode(pe packetEncoder) (err error) { - err = pe.putArrayLength(len(fr.Blocks)) - if err != nil { - return err - } - - for topic, partitions := range fr.Blocks { - err = pe.putString(topic) - if err != nil { - return err - } - - err = pe.putArrayLength(len(partitions)) - if err != nil { - return err - } - - for id, block := range partitions { - pe.putInt32(id) - err = block.encode(pe) - if err != nil { - return err - } - } - - } - return nil -} - -func (fr *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { - if fr.Blocks == nil { - return nil - } - - if fr.Blocks[topic] == nil { - return nil - } - - return fr.Blocks[topic][partition] -} - -func (fr *FetchResponse) AddError(topic string, partition int32, err KError) { - if fr.Blocks == nil { - fr.Blocks = make(map[string]map[int32]*FetchResponseBlock) - } - partitions, ok := fr.Blocks[topic] - if !ok { - partitions = make(map[int32]*FetchResponseBlock) - fr.Blocks[topic] = partitions - } - frb, ok := partitions[partition] - if !ok { - frb = new(FetchResponseBlock) - partitions[partition] = frb - } - frb.Err = err -} - -func (fr *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { - if fr.Blocks == nil { - fr.Blocks = make(map[string]map[int32]*FetchResponseBlock) - } - partitions, ok := fr.Blocks[topic] - if !ok { - partitions = make(map[int32]*FetchResponseBlock) - fr.Blocks[topic] = partitions - } - frb, ok := partitions[partition] - if !ok { - frb = new(FetchResponseBlock) - partitions[partition] = frb - } - var kb []byte - var vb []byte - if key != nil { - kb, _ = key.Encode() - } - if value != nil { - vb, _ = value.Encode() - } - msg := &Message{Key: kb, Value: vb} - msgBlock := &MessageBlock{Msg: msg, Offset: offset} - frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response_test.go deleted file mode 100644 index a23a05340..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/fetch_response_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package sarama - -import ( - "bytes" - "testing" -) - -var ( - emptyFetchResponse = []byte{ - 0x00, 0x00, 0x00, 0x00} - - oneMessageFetchResponse = []byte{ - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x05, - 0x00, 0x01, - 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, - 0x00, 0x00, 0x00, 0x1C, - // messageSet - 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x10, - // message - 0x23, 0x96, 0x4a, 0xf7, // CRC - 0x00, - 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE} -) - -func TestEmptyFetchResponse(t *testing.T) { - response := FetchResponse{} - testDecodable(t, "empty", &response, emptyFetchResponse) - - if len(response.Blocks) != 0 { - t.Error("Decoding produced topic blocks where there were none.") - } - -} - -func TestOneMessageFetchResponse(t *testing.T) { - response := FetchResponse{} - testDecodable(t, "one message", &response, oneMessageFetchResponse) - - if len(response.Blocks) != 1 { - t.Fatal("Decoding produced incorrect number of topic blocks.") - } - - if len(response.Blocks["topic"]) != 1 { - t.Fatal("Decoding produced incorrect number of partition blocks for topic.") - } - - block := response.GetBlock("topic", 5) - if block == nil { - t.Fatal("GetBlock didn't return block.") - } - if block.Err != ErrOffsetOutOfRange { - t.Error("Decoding didn't produce correct error code.") - } - if block.HighWaterMarkOffset != 0x10101010 { - t.Error("Decoding didn't produce correct high water mark offset.") - } - if block.MsgSet.PartialTrailingMessage { - t.Error("Decoding detected a partial trailing message where there wasn't one.") - } - - if len(block.MsgSet.Messages) != 1 { - t.Fatal("Decoding produced incorrect number of messages.") - } - msgBlock := block.MsgSet.Messages[0] - if msgBlock.Offset != 0x550000 { - t.Error("Decoding produced incorrect message offset.") - } - msg := msgBlock.Msg - if msg.Codec != CompressionNone { - t.Error("Decoding produced incorrect message compression.") - } - if msg.Key != nil { - t.Error("Decoding produced message key where there was none.") - } - if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) { - t.Error("Decoding produced incorrect message value.") - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/functional_client_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/functional_client_test.go deleted file mode 100644 index 9e8e32968..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/functional_client_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package sarama - -import ( - "fmt" - "testing" - "time" -) - -func TestFuncConnectionFailure(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - Proxies["kafka1"].Enabled = false - SaveProxy(t, "kafka1") - - config := NewConfig() - config.Metadata.Retry.Max = 1 - - _, err := NewClient([]string{kafkaBrokers[0]}, config) - if err != ErrOutOfBrokers { - t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err) - } -} - -func TestFuncClientMetadata(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - config := NewConfig() - config.Metadata.Retry.Max = 1 - config.Metadata.Retry.Backoff = 10 * time.Millisecond - client, err := NewClient(kafkaBrokers, config) - if err != nil { - t.Fatal(err) - } - - if err := client.RefreshMetadata("unknown_topic"); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, got", err) - } - - if _, err := client.Leader("unknown_topic", 0); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, got", err) - } - - if _, err := client.Replicas("invalid/topic", 0); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, got", err) - } - - partitions, err := client.Partitions("test.4") - if err != nil { - t.Error(err) - } - if len(partitions) != 4 { - t.Errorf("Expected test.4 topic to have 4 partitions, found %v", partitions) - } - - partitions, err = client.Partitions("test.1") - if err != nil { - t.Error(err) - } - if len(partitions) != 1 { - t.Errorf("Expected test.1 topic to have 1 partitions, found %v", partitions) - } - - safeClose(t, client) -} - -func TestFuncClientCoordinator(t *testing.T) { - checkKafkaVersion(t, "0.8.2") - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - client, err := NewClient(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - broker, err := client.Coordinator(fmt.Sprintf("another_new_consumer_group_%d", i)) - if err != nil { - t.Error(err) - } - - if connected, err := broker.Connected(); !connected || err != nil { - t.Errorf("Expected to coordinator %s broker to be properly connected.", broker.Addr()) - } - } - - safeClose(t, client) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/functional_consumer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/functional_consumer_test.go deleted file mode 100644 index ab8433109..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/functional_consumer_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package sarama - -import ( - "math" - "testing" -) - -func TestFuncConsumerOffsetOutOfRange(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - consumer, err := NewConsumer(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - - if _, err := consumer.ConsumePartition("test.1", 0, -10); err != ErrOffsetOutOfRange { - t.Error("Expected ErrOffsetOutOfRange, got:", err) - } - - if _, err := consumer.ConsumePartition("test.1", 0, math.MaxInt64); err != ErrOffsetOutOfRange { - t.Error("Expected ErrOffsetOutOfRange, got:", err) - } - - safeClose(t, consumer) -} - -func TestConsumerHighWaterMarkOffset(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - p, err := NewSyncProducer(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - defer safeClose(t, p) - - _, offset, err := p.SendMessage(&ProducerMessage{Topic: "test.1", Value: StringEncoder("Test")}) - if err != nil { - t.Fatal(err) - } - - c, err := NewConsumer(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - defer safeClose(t, c) - - pc, err := c.ConsumePartition("test.1", 0, OffsetOldest) - if err != nil { - t.Fatal(err) - } - - <-pc.Messages() - - if hwmo := pc.HighWaterMarkOffset(); hwmo != offset+1 { - t.Logf("Last produced offset %d; high water mark should be one higher but found %d.", offset, hwmo) - } - - safeClose(t, pc) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/functional_producer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/functional_producer_test.go deleted file mode 100644 index 1504e7600..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/functional_producer_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package sarama - -import ( - "fmt" - "sync" - "testing" - "time" -) - -const TestBatchSize = 1000 - -func TestFuncProducing(t *testing.T) { - config := NewConfig() - testProducingMessages(t, config) -} - -func TestFuncProducingGzip(t *testing.T) { - config := NewConfig() - config.Producer.Compression = CompressionGZIP - testProducingMessages(t, config) -} - -func TestFuncProducingSnappy(t *testing.T) { - config := NewConfig() - config.Producer.Compression = CompressionSnappy - testProducingMessages(t, config) -} - -func TestFuncProducingNoResponse(t *testing.T) { - config := NewConfig() - config.Producer.RequiredAcks = NoResponse - testProducingMessages(t, config) -} - -func TestFuncProducingFlushing(t *testing.T) { - config := NewConfig() - config.Producer.Flush.Messages = TestBatchSize / 8 - config.Producer.Flush.Frequency = 250 * time.Millisecond - testProducingMessages(t, config) -} - -func TestFuncMultiPartitionProduce(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - config := NewConfig() - config.ChannelBufferSize = 20 - config.Producer.Flush.Frequency = 50 * time.Millisecond - config.Producer.Flush.Messages = 200 - config.Producer.Return.Successes = true - producer, err := NewSyncProducer(kafkaBrokers, config) - if err != nil { - t.Fatal(err) - } - - var wg sync.WaitGroup - wg.Add(TestBatchSize) - - for i := 1; i <= TestBatchSize; i++ { - go func(i int) { - defer wg.Done() - msg := &ProducerMessage{Topic: "test.64", Key: nil, Value: StringEncoder(fmt.Sprintf("hur %d", i))} - if _, _, err := producer.SendMessage(msg); err != nil { - t.Error(i, err) - } - }(i) - } - - wg.Wait() - if err := producer.Close(); err != nil { - t.Error(err) - } -} - -func TestFuncProducingToInvalidTopic(t *testing.T) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - producer, err := NewSyncProducer(kafkaBrokers, nil) - if err != nil { - t.Fatal(err) - } - - if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, found", err) - } - - if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition { - t.Error("Expected ErrUnknownTopicOrPartition, found", err) - } - - safeClose(t, producer) -} - -func testProducingMessages(t *testing.T, config *Config) { - setupFunctionalTest(t) - defer teardownFunctionalTest(t) - - config.Producer.Return.Successes = true - config.Consumer.Return.Errors = true - - client, err := NewClient(kafkaBrokers, config) - if err != nil { - t.Fatal(err) - } - - master, err := NewConsumerFromClient(client) - if err != nil { - t.Fatal(err) - } - consumer, err := master.ConsumePartition("test.1", 0, OffsetNewest) - if err != nil { - t.Fatal(err) - } - - producer, err := NewAsyncProducerFromClient(client) - if err != nil { - t.Fatal(err) - } - - expectedResponses := TestBatchSize - for i := 1; i <= TestBatchSize; { - msg := &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(fmt.Sprintf("testing %d", i))} - select { - case producer.Input() <- msg: - i++ - case ret := <-producer.Errors(): - t.Fatal(ret.Err) - case <-producer.Successes(): - expectedResponses-- - } - } - for expectedResponses > 0 { - select { - case ret := <-producer.Errors(): - t.Fatal(ret.Err) - case <-producer.Successes(): - expectedResponses-- - } - } - safeClose(t, producer) - - for i := 1; i <= TestBatchSize; i++ { - select { - case <-time.After(10 * time.Second): - t.Fatal("Not received any more events in the last 10 seconds.") - - case err := <-consumer.Errors(): - t.Error(err) - - case message := <-consumer.Messages(): - if string(message.Value) != fmt.Sprintf("testing %d", i) { - t.Fatalf("Unexpected message with index %d: %s", i, message.Value) - } - } - - } - safeClose(t, consumer) - safeClose(t, client) -} - -// Benchmarks - -func BenchmarkProducerSmall(b *testing.B) { - benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 128))) -} -func BenchmarkProducerMedium(b *testing.B) { - benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 1024))) -} -func BenchmarkProducerLarge(b *testing.B) { - benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 8192))) -} -func BenchmarkProducerSmallSinglePartition(b *testing.B) { - benchmarkProducer(b, nil, "test.1", ByteEncoder(make([]byte, 128))) -} -func BenchmarkProducerMediumSnappy(b *testing.B) { - conf := NewConfig() - conf.Producer.Compression = CompressionSnappy - benchmarkProducer(b, conf, "test.1", ByteEncoder(make([]byte, 1024))) -} - -func benchmarkProducer(b *testing.B, conf *Config, topic string, value Encoder) { - setupFunctionalTest(b) - defer teardownFunctionalTest(b) - - producer, err := NewAsyncProducer(kafkaBrokers, conf) - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - - for i := 1; i <= b.N; { - msg := &ProducerMessage{Topic: topic, Key: StringEncoder(fmt.Sprintf("%d", i)), Value: value} - select { - case producer.Input() <- msg: - i++ - case ret := <-producer.Errors(): - b.Fatal(ret.Err) - } - } - safeClose(b, producer) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/functional_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/functional_test.go deleted file mode 100644 index 171002ee9..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/functional_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package sarama - -import ( - "log" - "math/rand" - "net" - "os" - "strconv" - "strings" - "testing" - "time" - - toxiproxy "github.com/Shopify/toxiproxy/client" -) - -const ( - VagrantToxiproxy = "http://192.168.100.67:8474" - VagrantKafkaPeers = "192.168.100.67:9091,192.168.100.67:9092,192.168.100.67:9093,192.168.100.67:9094,192.168.100.67:9095" - VagrantZookeeperPeers = "192.168.100.67:2181,192.168.100.67:2182,192.168.100.67:2183,192.168.100.67:2184,192.168.100.67:2185" -) - -var ( - kafkaAvailable, kafkaRequired bool - kafkaBrokers []string - - proxyClient *toxiproxy.Client - Proxies map[string]*toxiproxy.Proxy - ZKProxies = []string{"zk1", "zk2", "zk3", "zk4", "zk5"} - KafkaProxies = []string{"kafka1", "kafka2", "kafka3", "kafka4", "kafka5"} -) - -func init() { - if os.Getenv("DEBUG") == "true" { - Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) - } - - seed := time.Now().UTC().UnixNano() - if tmp := os.Getenv("TEST_SEED"); tmp != "" { - seed, _ = strconv.ParseInt(tmp, 0, 64) - } - Logger.Println("Using random seed:", seed) - rand.Seed(seed) - - proxyAddr := os.Getenv("TOXIPROXY_ADDR") - if proxyAddr == "" { - proxyAddr = VagrantToxiproxy - } - proxyClient = toxiproxy.NewClient(proxyAddr) - - kafkaPeers := os.Getenv("KAFKA_PEERS") - if kafkaPeers == "" { - kafkaPeers = VagrantKafkaPeers - } - kafkaBrokers = strings.Split(kafkaPeers, ",") - - if c, err := net.DialTimeout("tcp", kafkaBrokers[0], 5*time.Second); err == nil { - if err = c.Close(); err == nil { - kafkaAvailable = true - } - } - - kafkaRequired = os.Getenv("CI") != "" -} - -func checkKafkaAvailability(t testing.TB) { - if !kafkaAvailable { - if kafkaRequired { - t.Fatalf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0]) - } else { - t.Skipf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0]) - } - } -} - -func checkKafkaVersion(t testing.TB, requiredVersion string) { - kafkaVersion := os.Getenv("KAFKA_VERSION") - if kafkaVersion == "" { - t.Logf("No KAFKA_VERSION set. This tests requires Kafka version %s or higher. Continuing...", requiredVersion) - } else { - available := parseKafkaVersion(kafkaVersion) - required := parseKafkaVersion(requiredVersion) - if !available.satisfies(required) { - t.Skipf("Kafka version %s is required for this test; you have %s. Skipping...", requiredVersion, kafkaVersion) - } - } -} - -func resetProxies(t testing.TB) { - if err := proxyClient.ResetState(); err != nil { - t.Error(err) - } - Proxies = nil -} - -func fetchProxies(t testing.TB) { - var err error - Proxies, err = proxyClient.Proxies() - if err != nil { - t.Fatal(err) - } -} - -func SaveProxy(t *testing.T, px string) { - if err := Proxies[px].Save(); err != nil { - t.Fatal(err) - } -} - -func setupFunctionalTest(t testing.TB) { - checkKafkaAvailability(t) - resetProxies(t) - fetchProxies(t) -} - -func teardownFunctionalTest(t testing.TB) { - resetProxies(t) -} - -type kafkaVersion []int - -func (kv kafkaVersion) satisfies(other kafkaVersion) bool { - var ov int - for index, v := range kv { - if len(other) <= index { - ov = 0 - } else { - ov = other[index] - } - - if v < ov { - return false - } - } - return true -} - -func parseKafkaVersion(version string) kafkaVersion { - numbers := strings.Split(version, ".") - result := make(kafkaVersion, 0, len(numbers)) - for _, number := range numbers { - nr, _ := strconv.Atoi(number) - result = append(result, nr) - } - - return result -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/length_field.go b/Godeps/_workspace/src/github.com/Shopify/sarama/length_field.go deleted file mode 100644 index 70078be5d..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/length_field.go +++ /dev/null @@ -1,29 +0,0 @@ -package sarama - -import "encoding/binary" - -// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths. -type lengthField struct { - startOffset int -} - -func (l *lengthField) saveOffset(in int) { - l.startOffset = in -} - -func (l *lengthField) reserveLength() int { - return 4 -} - -func (l *lengthField) run(curOffset int, buf []byte) error { - binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4)) - return nil -} - -func (l *lengthField) check(curOffset int, buf []byte) error { - if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) { - return PacketDecodingError{"length field invalid"} - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/message.go b/Godeps/_workspace/src/github.com/Shopify/sarama/message.go deleted file mode 100644 index 49b19c5a6..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/message.go +++ /dev/null @@ -1,154 +0,0 @@ -package sarama - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" -) - -// CompressionCodec represents the various compression codecs recognized by Kafka in messages. -type CompressionCodec int8 - -// only the last two bits are really used -const compressionCodecMask int8 = 0x03 - -const ( - CompressionNone CompressionCodec = 0 - CompressionGZIP CompressionCodec = 1 - CompressionSnappy CompressionCodec = 2 -) - -// The spec just says: "This is a version id used to allow backwards compatible evolution of the message -// binary format." but it doesn't say what the current value is, so presumably 0... -const messageFormat int8 = 0 - -type Message struct { - Codec CompressionCodec // codec used to compress the message contents - Key []byte // the message key, may be nil - Value []byte // the message contents - Set *MessageSet // the message set a message might wrap - - compressedCache []byte -} - -func (m *Message) encode(pe packetEncoder) error { - pe.push(&crc32Field{}) - - pe.putInt8(messageFormat) - - attributes := int8(m.Codec) & compressionCodecMask - pe.putInt8(attributes) - - err := pe.putBytes(m.Key) - if err != nil { - return err - } - - var payload []byte - - if m.compressedCache != nil { - payload = m.compressedCache - m.compressedCache = nil - } else { - switch m.Codec { - case CompressionNone: - payload = m.Value - case CompressionGZIP: - var buf bytes.Buffer - writer := gzip.NewWriter(&buf) - if _, err = writer.Write(m.Value); err != nil { - return err - } - if err = writer.Close(); err != nil { - return err - } - m.compressedCache = buf.Bytes() - payload = m.compressedCache - case CompressionSnappy: - tmp := snappyEncode(m.Value) - m.compressedCache = tmp - payload = m.compressedCache - default: - return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)} - } - } - - if err = pe.putBytes(payload); err != nil { - return err - } - - return pe.pop() -} - -func (m *Message) decode(pd packetDecoder) (err error) { - err = pd.push(&crc32Field{}) - if err != nil { - return err - } - - format, err := pd.getInt8() - if err != nil { - return err - } - if format != messageFormat { - return PacketDecodingError{"unexpected messageFormat"} - } - - attribute, err := pd.getInt8() - if err != nil { - return err - } - m.Codec = CompressionCodec(attribute & compressionCodecMask) - - m.Key, err = pd.getBytes() - if err != nil { - return err - } - - m.Value, err = pd.getBytes() - if err != nil { - return err - } - - switch m.Codec { - case CompressionNone: - // nothing to do - case CompressionGZIP: - if m.Value == nil { - return PacketDecodingError{"GZIP compression specified, but no data to uncompress"} - } - reader, err := gzip.NewReader(bytes.NewReader(m.Value)) - if err != nil { - return err - } - if m.Value, err = ioutil.ReadAll(reader); err != nil { - return err - } - return m.decodeSet() - case CompressionSnappy: - if m.Value == nil { - return PacketDecodingError{"Snappy compression specified, but no data to uncompress"} - } - if m.Value, err = snappyDecode(m.Value); err != nil { - return err - } - return m.decodeSet() - default: - return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)} - } - - err = pd.pop() - if err != nil { - return err - } - - return nil -} - -// decodes a message set from a previousy encoded bulk-message -func (m *Message) decodeSet() (err error) { - pd := realDecoder{raw: m.Value} - m.Set = &MessageSet{} - return m.Set.decode(&pd) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/message_set.go b/Godeps/_workspace/src/github.com/Shopify/sarama/message_set.go deleted file mode 100644 index f028784e5..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/message_set.go +++ /dev/null @@ -1,89 +0,0 @@ -package sarama - -type MessageBlock struct { - Offset int64 - Msg *Message -} - -// Messages convenience helper which returns either all the -// messages that are wrapped in this block -func (msb *MessageBlock) Messages() []*MessageBlock { - if msb.Msg.Set != nil { - return msb.Msg.Set.Messages - } - return []*MessageBlock{msb} -} - -func (msb *MessageBlock) encode(pe packetEncoder) error { - pe.putInt64(msb.Offset) - pe.push(&lengthField{}) - err := msb.Msg.encode(pe) - if err != nil { - return err - } - return pe.pop() -} - -func (msb *MessageBlock) decode(pd packetDecoder) (err error) { - if msb.Offset, err = pd.getInt64(); err != nil { - return err - } - - if err = pd.push(&lengthField{}); err != nil { - return err - } - - msb.Msg = new(Message) - if err = msb.Msg.decode(pd); err != nil { - return err - } - - if err = pd.pop(); err != nil { - return err - } - - return nil -} - -type MessageSet struct { - PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock - Messages []*MessageBlock -} - -func (ms *MessageSet) encode(pe packetEncoder) error { - for i := range ms.Messages { - err := ms.Messages[i].encode(pe) - if err != nil { - return err - } - } - return nil -} - -func (ms *MessageSet) decode(pd packetDecoder) (err error) { - ms.Messages = nil - - for pd.remaining() > 0 { - msb := new(MessageBlock) - err = msb.decode(pd) - switch err { - case nil: - ms.Messages = append(ms.Messages, msb) - case ErrInsufficientData: - // As an optimization the server is allowed to return a partial message at the - // end of the message set. Clients should handle this case. So we just ignore such things. - ms.PartialTrailingMessage = true - return nil - default: - return err - } - } - - return nil -} - -func (ms *MessageSet) addMessage(msg *Message) { - block := new(MessageBlock) - block.Msg = msg - ms.Messages = append(ms.Messages, block) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/message_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/message_test.go deleted file mode 100644 index 1dae896fe..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/message_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyMessage = []byte{ - 167, 236, 104, 3, // CRC - 0x00, // magic version byte - 0x00, // attribute flags - 0xFF, 0xFF, 0xFF, 0xFF, // key - 0xFF, 0xFF, 0xFF, 0xFF} // value - - emptyGzipMessage = []byte{ - 97, 79, 149, 90, //CRC - 0x00, // magic version byte - 0x01, // attribute flags - 0xFF, 0xFF, 0xFF, 0xFF, // key - // value - 0x00, 0x00, 0x00, 0x17, - 0x1f, 0x8b, - 0x08, - 0, 0, 9, 110, 136, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0} - - emptyBulkSnappyMessage = []byte{ - 180, 47, 53, 209, //CRC - 0x00, // magic version byte - 0x02, // attribute flags - 0xFF, 0xFF, 0xFF, 0xFF, // key - 0, 0, 0, 42, - 130, 83, 78, 65, 80, 80, 89, 0, // SNAPPY magic - 0, 0, 0, 1, // min version - 0, 0, 0, 1, // default version - 0, 0, 0, 22, 52, 0, 0, 25, 1, 16, 14, 227, 138, 104, 118, 25, 15, 13, 1, 8, 1, 0, 0, 62, 26, 0} - - emptyBulkGzipMessage = []byte{ - 139, 160, 63, 141, //CRC - 0x00, // magic version byte - 0x01, // attribute flags - 0xFF, 0xFF, 0xFF, 0xFF, // key - 0x00, 0x00, 0x00, 0x27, // len - 0x1f, 0x8b, // Gzip Magic - 0x08, // deflate compressed - 0, 0, 0, 0, 0, 0, 0, 99, 96, 128, 3, 190, 202, 112, 143, 7, 12, 12, 255, 129, 0, 33, 200, 192, 136, 41, 3, 0, 199, 226, 155, 70, 52, 0, 0, 0} -) - -func TestMessageEncoding(t *testing.T) { - message := Message{} - testEncodable(t, "empty", &message, emptyMessage) - - message.Value = []byte{} - message.Codec = CompressionGZIP - testEncodable(t, "empty gzip", &message, emptyGzipMessage) -} - -func TestMessageDecoding(t *testing.T) { - message := Message{} - testDecodable(t, "empty", &message, emptyMessage) - if message.Codec != CompressionNone { - t.Error("Decoding produced compression codec where there was none.") - } - if message.Key != nil { - t.Error("Decoding produced key where there was none.") - } - if message.Value != nil { - t.Error("Decoding produced value where there was none.") - } - if message.Set != nil { - t.Error("Decoding produced set where there was none.") - } - - testDecodable(t, "empty gzip", &message, emptyGzipMessage) - if message.Codec != CompressionGZIP { - t.Error("Decoding produced incorrect compression codec (was gzip).") - } - if message.Key != nil { - t.Error("Decoding produced key where there was none.") - } - if message.Value == nil || len(message.Value) != 0 { - t.Error("Decoding produced nil or content-ful value where there was an empty array.") - } -} - -func TestMessageDecodingBulkSnappy(t *testing.T) { - message := Message{} - testDecodable(t, "bulk snappy", &message, emptyBulkSnappyMessage) - if message.Codec != CompressionSnappy { - t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionSnappy) - } - if message.Key != nil { - t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) - } - if message.Set == nil { - t.Error("Decoding produced no set, but one was expected.") - } else if len(message.Set.Messages) != 2 { - t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) - } -} - -func TestMessageDecodingBulkGzip(t *testing.T) { - message := Message{} - testDecodable(t, "bulk gzip", &message, emptyBulkGzipMessage) - if message.Codec != CompressionGZIP { - t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionGZIP) - } - if message.Key != nil { - t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) - } - if message.Set == nil { - t.Error("Decoding produced no set, but one was expected.") - } else if len(message.Set.Messages) != 2 { - t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request.go deleted file mode 100644 index 130cfd4f1..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request.go +++ /dev/null @@ -1,48 +0,0 @@ -package sarama - -type MetadataRequest struct { - Topics []string -} - -func (mr *MetadataRequest) encode(pe packetEncoder) error { - err := pe.putArrayLength(len(mr.Topics)) - if err != nil { - return err - } - - for i := range mr.Topics { - err = pe.putString(mr.Topics[i]) - if err != nil { - return err - } - } - return nil -} - -func (mr *MetadataRequest) decode(pd packetDecoder) error { - topicCount, err := pd.getArrayLength() - if err != nil { - return err - } - if topicCount == 0 { - return nil - } - - mr.Topics = make([]string, topicCount) - for i := range mr.Topics { - topic, err := pd.getString() - if err != nil { - return err - } - mr.Topics[i] = topic - } - return nil -} - -func (mr *MetadataRequest) key() int16 { - return 3 -} - -func (mr *MetadataRequest) version() int16 { - return 0 -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request_test.go deleted file mode 100644 index 44f3146e4..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_request_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package sarama - -import "testing" - -var ( - metadataRequestNoTopics = []byte{ - 0x00, 0x00, 0x00, 0x00} - - metadataRequestOneTopic = []byte{ - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x06, 't', 'o', 'p', 'i', 'c', '1'} - - metadataRequestThreeTopics = []byte{ - 0x00, 0x00, 0x00, 0x03, - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x03, 'b', 'a', 'r', - 0x00, 0x03, 'b', 'a', 'z'} -) - -func TestMetadataRequest(t *testing.T) { - request := new(MetadataRequest) - testRequest(t, "no topics", request, metadataRequestNoTopics) - - request.Topics = []string{"topic1"} - testRequest(t, "one topic", request, metadataRequestOneTopic) - - request.Topics = []string{"foo", "bar", "baz"} - testRequest(t, "three topics", request, metadataRequestThreeTopics) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response.go deleted file mode 100644 index b82221f7e..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response.go +++ /dev/null @@ -1,227 +0,0 @@ -package sarama - -type PartitionMetadata struct { - Err KError - ID int32 - Leader int32 - Replicas []int32 - Isr []int32 -} - -func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - pm.Err = KError(tmp) - - pm.ID, err = pd.getInt32() - if err != nil { - return err - } - - pm.Leader, err = pd.getInt32() - if err != nil { - return err - } - - pm.Replicas, err = pd.getInt32Array() - if err != nil { - return err - } - - pm.Isr, err = pd.getInt32Array() - if err != nil { - return err - } - - return nil -} - -func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) { - pe.putInt16(int16(pm.Err)) - pe.putInt32(pm.ID) - pe.putInt32(pm.Leader) - - err = pe.putInt32Array(pm.Replicas) - if err != nil { - return err - } - - err = pe.putInt32Array(pm.Isr) - if err != nil { - return err - } - - return nil -} - -type TopicMetadata struct { - Err KError - Name string - Partitions []*PartitionMetadata -} - -func (tm *TopicMetadata) decode(pd packetDecoder) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - tm.Err = KError(tmp) - - tm.Name, err = pd.getString() - if err != nil { - return err - } - - n, err := pd.getArrayLength() - if err != nil { - return err - } - tm.Partitions = make([]*PartitionMetadata, n) - for i := 0; i < n; i++ { - tm.Partitions[i] = new(PartitionMetadata) - err = tm.Partitions[i].decode(pd) - if err != nil { - return err - } - } - - return nil -} - -func (tm *TopicMetadata) encode(pe packetEncoder) (err error) { - pe.putInt16(int16(tm.Err)) - - err = pe.putString(tm.Name) - if err != nil { - return err - } - - err = pe.putArrayLength(len(tm.Partitions)) - if err != nil { - return err - } - - for _, pm := range tm.Partitions { - err = pm.encode(pe) - if err != nil { - return err - } - } - - return nil -} - -type MetadataResponse struct { - Brokers []*Broker - Topics []*TopicMetadata -} - -func (m *MetadataResponse) decode(pd packetDecoder) (err error) { - n, err := pd.getArrayLength() - if err != nil { - return err - } - - m.Brokers = make([]*Broker, n) - for i := 0; i < n; i++ { - m.Brokers[i] = new(Broker) - err = m.Brokers[i].decode(pd) - if err != nil { - return err - } - } - - n, err = pd.getArrayLength() - if err != nil { - return err - } - - m.Topics = make([]*TopicMetadata, n) - for i := 0; i < n; i++ { - m.Topics[i] = new(TopicMetadata) - err = m.Topics[i].decode(pd) - if err != nil { - return err - } - } - - return nil -} - -func (m *MetadataResponse) encode(pe packetEncoder) error { - err := pe.putArrayLength(len(m.Brokers)) - if err != nil { - return err - } - for _, broker := range m.Brokers { - err = broker.encode(pe) - if err != nil { - return err - } - } - - err = pe.putArrayLength(len(m.Topics)) - if err != nil { - return err - } - for _, tm := range m.Topics { - err = tm.encode(pe) - if err != nil { - return err - } - } - - return nil -} - -// testing API - -func (m *MetadataResponse) AddBroker(addr string, id int32) { - m.Brokers = append(m.Brokers, &Broker{id: id, addr: addr}) -} - -func (m *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { - var tmatch *TopicMetadata - - for _, tm := range m.Topics { - if tm.Name == topic { - tmatch = tm - goto foundTopic - } - } - - tmatch = new(TopicMetadata) - tmatch.Name = topic - m.Topics = append(m.Topics, tmatch) - -foundTopic: - - tmatch.Err = err - return tmatch -} - -func (m *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) { - tmatch := m.AddTopic(topic, ErrNoError) - var pmatch *PartitionMetadata - - for _, pm := range tmatch.Partitions { - if pm.ID == partition { - pmatch = pm - goto foundPartition - } - } - - pmatch = new(PartitionMetadata) - pmatch.ID = partition - tmatch.Partitions = append(tmatch.Partitions, pmatch) - -foundPartition: - - pmatch.Leader = brokerID - pmatch.Replicas = replicas - pmatch.Isr = isr - pmatch.Err = err - -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response_test.go deleted file mode 100644 index 1f1a51549..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/metadata_response_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyMetadataResponse = []byte{ - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - brokersNoTopicsMetadataResponse = []byte{ - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x00, 0xab, 0xff, - 0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', - 0x00, 0x00, 0x00, 0x33, - - 0x00, 0x01, 0x02, 0x03, - 0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm', - 0x00, 0x00, 0x01, 0x11, - - 0x00, 0x00, 0x00, 0x00} - - topicsNoBrokersMetadataResponse = []byte{ - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x00, - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x04, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x07, - 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, - 0x00, 0x00, 0x00, 0x00, - - 0x00, 0x00, - 0x00, 0x03, 'b', 'a', 'r', - 0x00, 0x00, 0x00, 0x00} -) - -func TestEmptyMetadataResponse(t *testing.T) { - response := MetadataResponse{} - - testDecodable(t, "empty", &response, emptyMetadataResponse) - if len(response.Brokers) != 0 { - t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") - } - if len(response.Topics) != 0 { - t.Error("Decoding produced", len(response.Topics), "topics where there were none!") - } -} - -func TestMetadataResponseWithBrokers(t *testing.T) { - response := MetadataResponse{} - - testDecodable(t, "brokers, no topics", &response, brokersNoTopicsMetadataResponse) - if len(response.Brokers) != 2 { - t.Fatal("Decoding produced", len(response.Brokers), "brokers where there were two!") - } - - if response.Brokers[0].id != 0xabff { - t.Error("Decoding produced invalid broker 0 id.") - } - if response.Brokers[0].addr != "localhost:51" { - t.Error("Decoding produced invalid broker 0 address.") - } - if response.Brokers[1].id != 0x010203 { - t.Error("Decoding produced invalid broker 1 id.") - } - if response.Brokers[1].addr != "google.com:273" { - t.Error("Decoding produced invalid broker 1 address.") - } - - if len(response.Topics) != 0 { - t.Error("Decoding produced", len(response.Topics), "topics where there were none!") - } -} - -func TestMetadataResponseWithTopics(t *testing.T) { - response := MetadataResponse{} - - testDecodable(t, "topics, no brokers", &response, topicsNoBrokersMetadataResponse) - if len(response.Brokers) != 0 { - t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") - } - - if len(response.Topics) != 2 { - t.Fatal("Decoding produced", len(response.Topics), "topics where there were two!") - } - - if response.Topics[0].Err != ErrNoError { - t.Error("Decoding produced invalid topic 0 error.") - } - - if response.Topics[0].Name != "foo" { - t.Error("Decoding produced invalid topic 0 name.") - } - - if len(response.Topics[0].Partitions) != 1 { - t.Fatal("Decoding produced invalid partition count for topic 0.") - } - - if response.Topics[0].Partitions[0].Err != ErrInvalidMessageSize { - t.Error("Decoding produced invalid topic 0 partition 0 error.") - } - - if response.Topics[0].Partitions[0].ID != 0x01 { - t.Error("Decoding produced invalid topic 0 partition 0 id.") - } - - if response.Topics[0].Partitions[0].Leader != 0x07 { - t.Error("Decoding produced invalid topic 0 partition 0 leader.") - } - - if len(response.Topics[0].Partitions[0].Replicas) != 3 { - t.Fatal("Decoding produced invalid topic 0 partition 0 replicas.") - } - for i := 0; i < 3; i++ { - if response.Topics[0].Partitions[0].Replicas[i] != int32(i+1) { - t.Error("Decoding produced invalid topic 0 partition 0 replica", i) - } - } - - if len(response.Topics[0].Partitions[0].Isr) != 0 { - t.Error("Decoding produced invalid topic 0 partition 0 isr length.") - } - - if response.Topics[1].Err != ErrNoError { - t.Error("Decoding produced invalid topic 1 error.") - } - - if response.Topics[1].Name != "bar" { - t.Error("Decoding produced invalid topic 0 name.") - } - - if len(response.Topics[1].Partitions) != 0 { - t.Error("Decoding produced invalid partition count for topic 1.") - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mockbroker_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mockbroker_test.go deleted file mode 100644 index 987697380..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/mockbroker_test.go +++ /dev/null @@ -1,273 +0,0 @@ -package sarama - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "net" - "reflect" - "strconv" - "sync" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" -) - -const ( - expectationTimeout = 500 * time.Millisecond -) - -type requestHandlerFunc func(req *request) (res encoder) - -// mockBroker is a mock Kafka broker. It consists of a TCP server on a -// kernel-selected localhost port that can accept many connections. It reads -// Kafka requests from that connection and passes them to the user specified -// handler function (see SetHandler) that generates respective responses. If -// the handler has not been explicitly specified then the broker returns -// responses set by the Returns function in the exact order they were provided. -// (if a response has a len of 0, nothing is sent, and the client request will -// timeout in this case). -// -// When running tests with one of these, it is strongly recommended to specify -// a timeout to `go test` so that if the broker hangs waiting for a response, -// the test panics. -// -// It is not necessary to prefix message length or correlation ID to your -// response bytes, the server does that automatically as a convenience. -type mockBroker struct { - brokerID int32 - port int32 - closing chan none - stopper chan none - expectations chan encoder - listener net.Listener - t *testing.T - latency time.Duration - handler requestHandlerFunc - history []RequestResponse - lock sync.Mutex -} - -type RequestResponse struct { - Request requestBody - Response encoder -} - -func (b *mockBroker) SetLatency(latency time.Duration) { - b.latency = latency -} - -// SetHandler sets the specified function as the request handler. Whenever -// a mock broker reads a request from the wire it passes the request to the -// function and sends back whatever the handler function returns. -func (b *mockBroker) SetHandler(handler requestHandlerFunc) { - b.lock.Lock() - b.handler = handler - b.lock.Unlock() -} - -func (b *mockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { - b.SetHandler(func(req *request) (res encoder) { - reqTypeName := reflect.TypeOf(req.body).Elem().Name() - mockResponse := handlerMap[reqTypeName] - if mockResponse == nil { - return nil - } - return mockResponse.For(req.body) - }) -} - -func (b *mockBroker) BrokerID() int32 { - return b.brokerID -} - -func (b *mockBroker) History() []RequestResponse { - b.lock.Lock() - history := make([]RequestResponse, len(b.history)) - copy(history, b.history) - b.lock.Unlock() - return history -} - -func (b *mockBroker) Port() int32 { - return b.port -} - -func (b *mockBroker) Addr() string { - return b.listener.Addr().String() -} - -func (b *mockBroker) Close() { - close(b.expectations) - if len(b.expectations) > 0 { - buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID())) - for e := range b.expectations { - _, _ = buf.WriteString(spew.Sdump(e)) - } - b.t.Error(buf.String()) - } - close(b.closing) - <-b.stopper -} - -func (b *mockBroker) serverLoop() { - defer close(b.stopper) - var err error - var conn net.Conn - - go func() { - <-b.closing - safeClose(b.t, b.listener) - }() - - wg := &sync.WaitGroup{} - i := 0 - for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() { - wg.Add(1) - go b.handleRequests(conn, i, wg) - i++ - } - wg.Wait() - Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) -} - -func (b *mockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) { - defer wg.Done() - defer func() { - _ = conn.Close() - }() - Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx) - var err error - - abort := make(chan none) - defer close(abort) - go func() { - select { - case <-b.closing: - _ = conn.Close() - case <-abort: - } - }() - - resHeader := make([]byte, 8) - for { - req, err := decodeRequest(conn) - if err != nil { - Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) - b.serverError(err) - break - } - - if b.latency > 0 { - time.Sleep(b.latency) - } - - b.lock.Lock() - res := b.handler(req) - b.history = append(b.history, RequestResponse{req.body, res}) - b.lock.Unlock() - - if res == nil { - Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) - continue - } - Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) - - encodedRes, err := encode(res) - if err != nil { - b.serverError(err) - break - } - if len(encodedRes) == 0 { - continue - } - - binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4)) - binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID)) - if _, err = conn.Write(resHeader); err != nil { - b.serverError(err) - break - } - if _, err = conn.Write(encodedRes); err != nil { - b.serverError(err) - break - } - } - Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) -} - -func (b *mockBroker) defaultRequestHandler(req *request) (res encoder) { - select { - case res, ok := <-b.expectations: - if !ok { - return nil - } - return res - case <-time.After(expectationTimeout): - return nil - } -} - -func (b *mockBroker) serverError(err error) { - isConnectionClosedError := false - if _, ok := err.(*net.OpError); ok { - isConnectionClosedError = true - } else if err == io.EOF { - isConnectionClosedError = true - } else if err.Error() == "use of closed network connection" { - isConnectionClosedError = true - } - - if isConnectionClosedError { - return - } - - b.t.Errorf(err.Error()) -} - -// newMockBroker launches a fake Kafka broker. It takes a *testing.T as provided by the -// test framework and a channel of responses to use. If an error occurs it is -// simply logged to the *testing.T and the broker exits. -func newMockBroker(t *testing.T, brokerID int32) *mockBroker { - return newMockBrokerAddr(t, brokerID, "localhost:0") -} - -// newMockBrokerAddr behaves like newMockBroker but listens on the address you give -// it rather than just some ephemeral port. -func newMockBrokerAddr(t *testing.T, brokerID int32, addr string) *mockBroker { - var err error - - broker := &mockBroker{ - closing: make(chan none), - stopper: make(chan none), - t: t, - brokerID: brokerID, - expectations: make(chan encoder, 512), - } - broker.handler = broker.defaultRequestHandler - - broker.listener, err = net.Listen("tcp", addr) - if err != nil { - t.Fatal(err) - } - Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String()) - _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) - if err != nil { - t.Fatal(err) - } - tmp, err := strconv.ParseInt(portStr, 10, 32) - if err != nil { - t.Fatal(err) - } - broker.port = int32(tmp) - - go broker.serverLoop() - - return broker -} - -func (b *mockBroker) Returns(e encoder) { - b.expectations <- e -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mockresponses_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mockresponses_test.go deleted file mode 100644 index 655d9fb3b..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/mockresponses_test.go +++ /dev/null @@ -1,411 +0,0 @@ -package sarama - -import ( - "testing" -) - -// MockResponse is a response builder interface it defines one method that -// allows generating a response based on a request body. -type MockResponse interface { - For(reqBody decoder) (res encoder) -} - -type mockWrapper struct { - res encoder -} - -func (mw *mockWrapper) For(reqBody decoder) (res encoder) { - return mw.res -} - -func newMockWrapper(res encoder) *mockWrapper { - return &mockWrapper{res: res} -} - -// mockMetadataResponse is a `MetadataResponse` builder. -type mockMetadataResponse struct { - leaders map[string]map[int32]int32 - brokers map[string]int32 - t *testing.T -} - -func newMockMetadataResponse(t *testing.T) *mockMetadataResponse { - return &mockMetadataResponse{ - leaders: make(map[string]map[int32]int32), - brokers: make(map[string]int32), - t: t, - } -} - -func (mmr *mockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *mockMetadataResponse { - partitions := mmr.leaders[topic] - if partitions == nil { - partitions = make(map[int32]int32) - mmr.leaders[topic] = partitions - } - partitions[partition] = brokerID - return mmr -} - -func (mmr *mockMetadataResponse) SetBroker(addr string, brokerID int32) *mockMetadataResponse { - mmr.brokers[addr] = brokerID - return mmr -} - -func (mor *mockMetadataResponse) For(reqBody decoder) encoder { - metadataRequest := reqBody.(*MetadataRequest) - metadataResponse := &MetadataResponse{} - for addr, brokerID := range mor.brokers { - metadataResponse.AddBroker(addr, brokerID) - } - if len(metadataRequest.Topics) == 0 { - for topic, partitions := range mor.leaders { - for partition, brokerID := range partitions { - metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) - } - } - return metadataResponse - } - for _, topic := range metadataRequest.Topics { - for partition, brokerID := range mor.leaders[topic] { - metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) - } - } - return metadataResponse -} - -// mockOffsetResponse is an `OffsetResponse` builder. -type mockOffsetResponse struct { - offsets map[string]map[int32]map[int64]int64 - t *testing.T -} - -func newMockOffsetResponse(t *testing.T) *mockOffsetResponse { - return &mockOffsetResponse{ - offsets: make(map[string]map[int32]map[int64]int64), - t: t, - } -} - -func (mor *mockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *mockOffsetResponse { - partitions := mor.offsets[topic] - if partitions == nil { - partitions = make(map[int32]map[int64]int64) - mor.offsets[topic] = partitions - } - times := partitions[partition] - if times == nil { - times = make(map[int64]int64) - partitions[partition] = times - } - times[time] = offset - return mor -} - -func (mor *mockOffsetResponse) For(reqBody decoder) encoder { - offsetRequest := reqBody.(*OffsetRequest) - offsetResponse := &OffsetResponse{} - for topic, partitions := range offsetRequest.blocks { - for partition, block := range partitions { - offset := mor.getOffset(topic, partition, block.time) - offsetResponse.AddTopicPartition(topic, partition, offset) - } - } - return offsetResponse -} - -func (mor *mockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 { - partitions := mor.offsets[topic] - if partitions == nil { - mor.t.Errorf("missing topic: %s", topic) - } - times := partitions[partition] - if times == nil { - mor.t.Errorf("missing partition: %d", partition) - } - offset, ok := times[time] - if !ok { - mor.t.Errorf("missing time: %d", time) - } - return offset -} - -// mockFetchResponse is a `FetchResponse` builder. -type mockFetchResponse struct { - messages map[string]map[int32]map[int64]Encoder - highWaterMarks map[string]map[int32]int64 - t *testing.T - batchSize int -} - -func newMockFetchResponse(t *testing.T, batchSize int) *mockFetchResponse { - return &mockFetchResponse{ - messages: make(map[string]map[int32]map[int64]Encoder), - highWaterMarks: make(map[string]map[int32]int64), - t: t, - batchSize: batchSize, - } -} - -func (mfr *mockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *mockFetchResponse { - partitions := mfr.messages[topic] - if partitions == nil { - partitions = make(map[int32]map[int64]Encoder) - mfr.messages[topic] = partitions - } - messages := partitions[partition] - if messages == nil { - messages = make(map[int64]Encoder) - partitions[partition] = messages - } - messages[offset] = msg - return mfr -} - -func (mfr *mockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *mockFetchResponse { - partitions := mfr.highWaterMarks[topic] - if partitions == nil { - partitions = make(map[int32]int64) - mfr.highWaterMarks[topic] = partitions - } - partitions[partition] = offset - return mfr -} - -func (mfr *mockFetchResponse) For(reqBody decoder) encoder { - fetchRequest := reqBody.(*FetchRequest) - res := &FetchResponse{} - for topic, partitions := range fetchRequest.blocks { - for partition, block := range partitions { - initialOffset := block.fetchOffset - offset := initialOffset - maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition)) - for i := 0; i < mfr.batchSize && offset < maxOffset; { - msg := mfr.getMessage(topic, partition, offset) - if msg != nil { - res.AddMessage(topic, partition, nil, msg, offset) - i++ - } - offset++ - } - fb := res.GetBlock(topic, partition) - if fb == nil { - res.AddError(topic, partition, ErrNoError) - fb = res.GetBlock(topic, partition) - } - fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition) - } - } - return res -} - -func (mfr *mockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder { - partitions := mfr.messages[topic] - if partitions == nil { - return nil - } - messages := partitions[partition] - if messages == nil { - return nil - } - return messages[offset] -} - -func (mfr *mockFetchResponse) getMessageCount(topic string, partition int32) int { - partitions := mfr.messages[topic] - if partitions == nil { - return 0 - } - messages := partitions[partition] - if messages == nil { - return 0 - } - return len(messages) -} - -func (mfr *mockFetchResponse) getHighWaterMark(topic string, partition int32) int64 { - partitions := mfr.highWaterMarks[topic] - if partitions == nil { - return 0 - } - return partitions[partition] -} - -// mockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder. -type mockConsumerMetadataResponse struct { - coordinators map[string]interface{} - t *testing.T -} - -func newMockConsumerMetadataResponse(t *testing.T) *mockConsumerMetadataResponse { - return &mockConsumerMetadataResponse{ - coordinators: make(map[string]interface{}), - t: t, - } -} - -func (mr *mockConsumerMetadataResponse) SetCoordinator(group string, broker *mockBroker) *mockConsumerMetadataResponse { - mr.coordinators[group] = broker - return mr -} - -func (mr *mockConsumerMetadataResponse) SetError(group string, kerror KError) *mockConsumerMetadataResponse { - mr.coordinators[group] = kerror - return mr -} - -func (mr *mockConsumerMetadataResponse) For(reqBody decoder) encoder { - req := reqBody.(*ConsumerMetadataRequest) - group := req.ConsumerGroup - res := &ConsumerMetadataResponse{} - v := mr.coordinators[group] - switch v := v.(type) { - case *mockBroker: - res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} - case KError: - res.Err = v - } - return res -} - -// mockOffsetCommitResponse is a `OffsetCommitResponse` builder. -type mockOffsetCommitResponse struct { - errors map[string]map[string]map[int32]KError - t *testing.T -} - -func newMockOffsetCommitResponse(t *testing.T) *mockOffsetCommitResponse { - return &mockOffsetCommitResponse{t: t} -} - -func (mr *mockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *mockOffsetCommitResponse { - if mr.errors == nil { - mr.errors = make(map[string]map[string]map[int32]KError) - } - topics := mr.errors[group] - if topics == nil { - topics = make(map[string]map[int32]KError) - mr.errors[group] = topics - } - partitions := topics[topic] - if partitions == nil { - partitions = make(map[int32]KError) - topics[topic] = partitions - } - partitions[partition] = kerror - return mr -} - -func (mr *mockOffsetCommitResponse) For(reqBody decoder) encoder { - req := reqBody.(*OffsetCommitRequest) - group := req.ConsumerGroup - res := &OffsetCommitResponse{} - for topic, partitions := range req.blocks { - for partition := range partitions { - res.AddError(topic, partition, mr.getError(group, topic, partition)) - } - } - return res -} - -func (mr *mockOffsetCommitResponse) getError(group, topic string, partition int32) KError { - topics := mr.errors[group] - if topics == nil { - return ErrNoError - } - partitions := topics[topic] - if partitions == nil { - return ErrNoError - } - kerror, ok := partitions[partition] - if !ok { - return ErrNoError - } - return kerror -} - -// mockProduceResponse is a `ProduceResponse` builder. -type mockProduceResponse struct { - errors map[string]map[int32]KError - t *testing.T -} - -func newMockProduceResponse(t *testing.T) *mockProduceResponse { - return &mockProduceResponse{t: t} -} - -func (mr *mockProduceResponse) SetError(topic string, partition int32, kerror KError) *mockProduceResponse { - if mr.errors == nil { - mr.errors = make(map[string]map[int32]KError) - } - partitions := mr.errors[topic] - if partitions == nil { - partitions = make(map[int32]KError) - mr.errors[topic] = partitions - } - partitions[partition] = kerror - return mr -} - -func (mr *mockProduceResponse) For(reqBody decoder) encoder { - req := reqBody.(*ProduceRequest) - res := &ProduceResponse{} - for topic, partitions := range req.msgSets { - for partition := range partitions { - res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) - } - } - return res -} - -func (mr *mockProduceResponse) getError(topic string, partition int32) KError { - partitions := mr.errors[topic] - if partitions == nil { - return ErrNoError - } - kerror, ok := partitions[partition] - if !ok { - return ErrNoError - } - return kerror -} - -// mockOffsetFetchResponse is a `OffsetFetchResponse` builder. -type mockOffsetFetchResponse struct { - offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock - t *testing.T -} - -func newMockOffsetFetchResponse(t *testing.T) *mockOffsetFetchResponse { - return &mockOffsetFetchResponse{t: t} -} - -func (mr *mockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *mockOffsetFetchResponse { - if mr.offsets == nil { - mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock) - } - topics := mr.offsets[group] - if topics == nil { - topics = make(map[string]map[int32]*OffsetFetchResponseBlock) - mr.offsets[group] = topics - } - partitions := topics[topic] - if partitions == nil { - partitions = make(map[int32]*OffsetFetchResponseBlock) - topics[topic] = partitions - } - partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror} - return mr -} - -func (mr *mockOffsetFetchResponse) For(reqBody decoder) encoder { - req := reqBody.(*OffsetFetchRequest) - group := req.ConsumerGroup - res := &OffsetFetchResponse{} - for topic, partitions := range mr.offsets[group] { - for partition, block := range partitions { - res.AddBlock(topic, partition, block) - } - } - return res -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/README.md deleted file mode 100644 index 55a6c2e61..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# sarama/mocks - -The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types. -You can use them to test your sarama applications using dependency injection. - -The following mock objects are available: - -- [Consumer](https://godoc.org/github.com/Shopify/sarama/mocks#Consumer), which will create [PartitionConsumer](https://godoc.org/github.com/Shopify/sarama/mocks#PartitionConsumer) mocks. -- [AsyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#AsyncProducer) -- [SyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#SyncProducer) - -The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified, -and the results will be reported to the `*testing.T` object you provided when creating the mock. diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer.go deleted file mode 100644 index 6ccf1f145..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer.go +++ /dev/null @@ -1,142 +0,0 @@ -package mocks - -import ( - "sync" - - "github.com/Shopify/sarama" -) - -// AsyncProducer implements sarama's Producer interface for testing purposes. -// Before you can send messages to it's Input channel, you have to set expectations -// so it knows how to handle the input. This way you can easily test success and -// failure scenarios. -type AsyncProducer struct { - l sync.Mutex - t ErrorReporter - expectations []*producerExpectation - closed chan struct{} - input chan *sarama.ProducerMessage - successes chan *sarama.ProducerMessage - errors chan *sarama.ProducerError - lastOffset int64 -} - -// NewAsyncProducer instantiates a new Producer mock. The t argument should -// be the *testing.T instance of your test method. An error will be written to it if -// an expectation is violated. The config argument is used to determine whether it -// should ack successes on the Successes channel. -func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer { - if config == nil { - config = sarama.NewConfig() - } - mp := &AsyncProducer{ - t: t, - closed: make(chan struct{}, 0), - expectations: make([]*producerExpectation, 0), - input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), - successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), - errors: make(chan *sarama.ProducerError, config.ChannelBufferSize), - } - - go func() { - defer func() { - close(mp.successes) - close(mp.errors) - }() - - for msg := range mp.input { - mp.l.Lock() - if mp.expectations == nil || len(mp.expectations) == 0 { - mp.expectations = nil - mp.t.Errorf("No more expectation set on this mock producer to handle the input message.") - } else { - expectation := mp.expectations[0] - mp.expectations = mp.expectations[1:] - if expectation.Result == errProduceSuccess { - mp.lastOffset++ - if config.Producer.Return.Successes { - msg.Offset = mp.lastOffset - mp.successes <- msg - } - } else { - if config.Producer.Return.Errors { - mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg} - } - } - } - mp.l.Unlock() - } - - mp.l.Lock() - if len(mp.expectations) > 0 { - mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations)) - } - mp.l.Unlock() - - close(mp.closed) - }() - - return mp -} - -//////////////////////////////////////////////// -// Implement Producer interface -//////////////////////////////////////////////// - -// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation. -// By closing a mock producer, you also tell it that no more input will be provided, so it will -// write an error to the test state if there's any remaining expectations. -func (mp *AsyncProducer) AsyncClose() { - close(mp.input) -} - -// Close corresponds with the Close method of sarama's Producer implementation. -// By closing a mock producer, you also tell it that no more input will be provided, so it will -// write an error to the test state if there's any remaining expectations. -func (mp *AsyncProducer) Close() error { - mp.AsyncClose() - <-mp.closed - return nil -} - -// Input corresponds with the Input method of sarama's Producer implementation. -// You have to set expectations on the mock producer before writing messages to the Input -// channel, so it knows how to handle them. If there is no more remaining expectations and -// a messages is written to the Input channel, the mock producer will write an error to the test -// state object. -func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage { - return mp.input -} - -// Successes corresponds with the Successes method of sarama's Producer implementation. -func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage { - return mp.successes -} - -// Errors corresponds with the Errors method of sarama's Producer implementation. -func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError { - return mp.errors -} - -//////////////////////////////////////////////// -// Setting expectations -//////////////////////////////////////////////// - -// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided -// on the input channel. The mock producer will handle the message as if it is produced successfully, -// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting -// is set to true. -func (mp *AsyncProducer) ExpectInputAndSucceed() { - mp.l.Lock() - defer mp.l.Unlock() - mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess}) -} - -// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided -// on the input channel. The mock producer will handle the message as if it failed to produce -// successfully. This means it will make a ProducerError available on the Errors channel. -func (mp *AsyncProducer) ExpectInputAndFail(err error) { - mp.l.Lock() - defer mp.l.Unlock() - mp.expectations = append(mp.expectations, &producerExpectation{Result: err}) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer_test.go deleted file mode 100644 index 520bf58b9..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/async_producer_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package mocks - -import ( - "fmt" - "testing" - - "github.com/Shopify/sarama" -) - -type testReporterMock struct { - errors []string -} - -func newTestReporterMock() *testReporterMock { - return &testReporterMock{errors: make([]string, 0)} -} - -func (trm *testReporterMock) Errorf(format string, args ...interface{}) { - trm.errors = append(trm.errors, fmt.Sprintf(format, args...)) -} - -func TestMockAsyncProducerImplementsAsyncProducerInterface(t *testing.T) { - var mp interface{} = &AsyncProducer{} - if _, ok := mp.(sarama.AsyncProducer); !ok { - t.Error("The mock producer should implement the sarama.Producer interface.") - } -} - -func TestProducerReturnsExpectationsToChannels(t *testing.T) { - config := sarama.NewConfig() - config.Producer.Return.Successes = true - mp := NewAsyncProducer(t, config) - - mp.ExpectInputAndSucceed() - mp.ExpectInputAndSucceed() - mp.ExpectInputAndFail(sarama.ErrOutOfBrokers) - - mp.Input() <- &sarama.ProducerMessage{Topic: "test 1"} - mp.Input() <- &sarama.ProducerMessage{Topic: "test 2"} - mp.Input() <- &sarama.ProducerMessage{Topic: "test 3"} - - msg1 := <-mp.Successes() - msg2 := <-mp.Successes() - err1 := <-mp.Errors() - - if msg1.Topic != "test 1" { - t.Error("Expected message 1 to be returned first") - } - - if msg2.Topic != "test 2" { - t.Error("Expected message 2 to be returned second") - } - - if err1.Msg.Topic != "test 3" || err1.Err != sarama.ErrOutOfBrokers { - t.Error("Expected message 3 to be returned as error") - } - - if err := mp.Close(); err != nil { - t.Error(err) - } -} - -func TestProducerWithTooFewExpectations(t *testing.T) { - trm := newTestReporterMock() - mp := NewAsyncProducer(trm, nil) - mp.ExpectInputAndSucceed() - - mp.Input() <- &sarama.ProducerMessage{Topic: "test"} - mp.Input() <- &sarama.ProducerMessage{Topic: "test"} - - if err := mp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} - -func TestProducerWithTooManyExpectations(t *testing.T) { - trm := newTestReporterMock() - mp := NewAsyncProducer(trm, nil) - mp.ExpectInputAndSucceed() - mp.ExpectInputAndFail(sarama.ErrOutOfBrokers) - - mp.Input() <- &sarama.ProducerMessage{Topic: "test"} - if err := mp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer.go deleted file mode 100644 index acf0894ee..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer.go +++ /dev/null @@ -1,316 +0,0 @@ -package mocks - -import ( - "sync" - "sync/atomic" - - "github.com/Shopify/sarama" -) - -// Consumer implements sarama's Consumer interface for testing purposes. -// Before you can start consuming from this consumer, you have to register -// topic/partitions using ExpectConsumePartition, and set expectations on them. -type Consumer struct { - l sync.Mutex - t ErrorReporter - config *sarama.Config - partitionConsumers map[string]map[int32]*PartitionConsumer - metadata map[string][]int32 -} - -// NewConsumer returns a new mock Consumer instance. The t argument should -// be the *testing.T instance of your test method. An error will be written to it if -// an expectation is violated. The config argument is currently unused and can be set to nil. -func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer { - if config == nil { - config = sarama.NewConfig() - } - - c := &Consumer{ - t: t, - config: config, - partitionConsumers: make(map[string]map[int32]*PartitionConsumer), - } - return c -} - -/////////////////////////////////////////////////// -// Consumer interface implementation -/////////////////////////////////////////////////// - -// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface. -// Before you can start consuming a partition, you have to set expectations on it using -// ExpectConsumePartition. You can only consume a partition once per consumer. -func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) { - c.l.Lock() - defer c.l.Unlock() - - if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil { - c.t.Errorf("No expectations set for %s/%d", topic, partition) - return nil, errOutOfExpectations - } - - pc := c.partitionConsumers[topic][partition] - if pc.consumed { - return nil, sarama.ConfigurationError("The topic/partition is already being consumed") - } - - if pc.offset != AnyOffset && pc.offset != offset { - c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset) - } - - pc.consumed = true - go pc.handleExpectations() - return pc, nil -} - -// Topics returns a list of topics, as registered with SetMetadata -func (c *Consumer) Topics() ([]string, error) { - c.l.Lock() - defer c.l.Unlock() - - if c.metadata == nil { - c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.") - return nil, sarama.ErrOutOfBrokers - } - - var result []string - for topic, _ := range c.metadata { - result = append(result, topic) - } - return result, nil -} - -// Partitions returns the list of parititons for the given topic, as registered with SetMetadata -func (c *Consumer) Partitions(topic string) ([]int32, error) { - c.l.Lock() - defer c.l.Unlock() - - if c.metadata == nil { - c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.") - return nil, sarama.ErrOutOfBrokers - } - if c.metadata[topic] == nil { - return nil, sarama.ErrUnknownTopicOrPartition - } - - return c.metadata[topic], nil -} - -// Close implements the Close method from the sarama.Consumer interface. It will close -// all registered PartitionConsumer instances. -func (c *Consumer) Close() error { - c.l.Lock() - defer c.l.Unlock() - - for _, partitions := range c.partitionConsumers { - for _, partitionConsumer := range partitions { - _ = partitionConsumer.Close() - } - } - - return nil -} - -/////////////////////////////////////////////////// -// Expectation API -/////////////////////////////////////////////////// - -// SetMetadata sets the clusters topic/partition metadata, -// which will be returned by Topics() and Partitions(). -func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) { - c.l.Lock() - defer c.l.Unlock() - - c.metadata = metadata -} - -// ExpectConsumePartition will register a topic/partition, so you can set expectations on it. -// The registered PartitionConsumer will be returned, so you can set expectations -// on it using method chanining. Once a topic/partition is registered, you are -// expected to start consuming it using ConsumePartition. If that doesn't happen, -// an error will be written to the error reporter once the mock consumer is closed. It will -// also expect that the -func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer { - c.l.Lock() - defer c.l.Unlock() - - if c.partitionConsumers[topic] == nil { - c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer) - } - - if c.partitionConsumers[topic][partition] == nil { - c.partitionConsumers[topic][partition] = &PartitionConsumer{ - t: c.t, - topic: topic, - partition: partition, - offset: offset, - expectations: make(chan *consumerExpectation, 1000), - messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), - errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize), - } - } - - return c.partitionConsumers[topic][partition] -} - -/////////////////////////////////////////////////// -// PartitionConsumer mock type -/////////////////////////////////////////////////// - -// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes. -// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is -// registered first using the Consumer's ExpectConsumePartition method. Before consuming the -// Errors and Messages channel, you should specify what values will be provided on these -// channels using YieldMessage and YieldError. -type PartitionConsumer struct { - l sync.Mutex - t ErrorReporter - topic string - partition int32 - offset int64 - expectations chan *consumerExpectation - messages chan *sarama.ConsumerMessage - errors chan *sarama.ConsumerError - singleClose sync.Once - consumed bool - errorsShouldBeDrained bool - messagesShouldBeDrained bool - highWaterMarkOffset int64 -} - -func (pc *PartitionConsumer) handleExpectations() { - pc.l.Lock() - defer pc.l.Unlock() - - for ex := range pc.expectations { - if ex.Err != nil { - pc.errors <- &sarama.ConsumerError{ - Topic: pc.topic, - Partition: pc.partition, - Err: ex.Err, - } - } else { - atomic.AddInt64(&pc.highWaterMarkOffset, 1) - - ex.Msg.Topic = pc.topic - ex.Msg.Partition = pc.partition - ex.Msg.Offset = atomic.LoadInt64(&pc.highWaterMarkOffset) - - pc.messages <- ex.Msg - } - } - - close(pc.messages) - close(pc.errors) -} - -/////////////////////////////////////////////////// -// PartitionConsumer interface implementation -/////////////////////////////////////////////////// - -// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface. -func (pc *PartitionConsumer) AsyncClose() { - pc.singleClose.Do(func() { - close(pc.expectations) - }) -} - -// Close implements the Close method from the sarama.PartitionConsumer interface. It will -// verify whether the partition consumer was actually started. -func (pc *PartitionConsumer) Close() error { - if !pc.consumed { - pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition) - return errPartitionConsumerNotStarted - } - - if pc.errorsShouldBeDrained && len(pc.errors) > 0 { - pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors)) - } - - if pc.messagesShouldBeDrained && len(pc.messages) > 0 { - pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages)) - } - - pc.AsyncClose() - - var ( - closeErr error - wg sync.WaitGroup - ) - - wg.Add(1) - go func() { - defer wg.Done() - - var errs = make(sarama.ConsumerErrors, 0) - for err := range pc.errors { - errs = append(errs, err) - } - - if len(errs) > 0 { - closeErr = errs - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - for _ = range pc.messages { - // drain - } - }() - - wg.Wait() - return closeErr -} - -// Errors implements the Errors method from the sarama.PartitionConsumer interface. -func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError { - return pc.errors -} - -// Messages implements the Messages method from the sarama.PartitionConsumer interface. -func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage { - return pc.messages -} - -func (pc *PartitionConsumer) HighWaterMarkOffset() int64 { - return atomic.LoadInt64(&pc.highWaterMarkOffset) + 1 -} - -/////////////////////////////////////////////////// -// Expectation API -/////////////////////////////////////////////////// - -// YieldMessage will yield a messages Messages channel of this partition consumer -// when it is consumed. By default, the mock consumer will not verify whether this -// message was consumed from the Messages channel, because there are legitimate -// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will -// verify that the channel is empty on close. -func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) { - pc.expectations <- &consumerExpectation{Msg: msg} -} - -// YieldError will yield an error on the Errors channel of this partition consumer -// when it is consumed. By default, the mock consumer will not verify whether this error was -// consumed from the Errors channel, because there are legitimate reasons for this -// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that -// the channel is empty on close. -func (pc *PartitionConsumer) YieldError(err error) { - pc.expectations <- &consumerExpectation{Err: err} -} - -// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer -// that the messages channel will be fully drained when Close is called. If this -// expectation is not met, an error is reported to the error reporter. -func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() { - pc.messagesShouldBeDrained = true -} - -// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer -// that the errors channel will be fully drained when Close is called. If this -// expectation is not met, an error is reported to the error reporter. -func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() { - pc.errorsShouldBeDrained = true -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer_test.go deleted file mode 100644 index 50dad3a69..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/consumer_test.go +++ /dev/null @@ -1,249 +0,0 @@ -package mocks - -import ( - "sort" - "testing" - - "github.com/Shopify/sarama" -) - -func TestMockConsumerImplementsConsumerInterface(t *testing.T) { - var c interface{} = &Consumer{} - if _, ok := c.(sarama.Consumer); !ok { - t.Error("The mock consumer should implement the sarama.Consumer interface.") - } - - var pc interface{} = &PartitionConsumer{} - if _, ok := pc.(sarama.PartitionConsumer); !ok { - t.Error("The mock partitionconsumer should implement the sarama.PartitionConsumer interface.") - } -} - -func TestConsumerHandlesExpectations(t *testing.T) { - consumer := NewConsumer(t, nil) - defer func() { - if err := consumer.Close(); err != nil { - t.Error(err) - } - }() - - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")}) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) - consumer.ExpectConsumePartition("test", 1, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world again")}) - consumer.ExpectConsumePartition("other", 0, AnyOffset).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello other")}) - - pc_test0, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) - if err != nil { - t.Fatal(err) - } - test0_msg := <-pc_test0.Messages() - if test0_msg.Topic != "test" || test0_msg.Partition != 0 || string(test0_msg.Value) != "hello world" { - t.Error("Message was not as expected:", test0_msg) - } - test0_err := <-pc_test0.Errors() - if test0_err.Err != sarama.ErrOutOfBrokers { - t.Error("Expected sarama.ErrOutOfBrokers, found:", test0_err.Err) - } - - pc_test1, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest) - if err != nil { - t.Fatal(err) - } - test1_msg := <-pc_test1.Messages() - if test1_msg.Topic != "test" || test1_msg.Partition != 1 || string(test1_msg.Value) != "hello world again" { - t.Error("Message was not as expected:", test1_msg) - } - - pc_other0, err := consumer.ConsumePartition("other", 0, sarama.OffsetNewest) - if err != nil { - t.Fatal(err) - } - other0_msg := <-pc_other0.Messages() - if other0_msg.Topic != "other" || other0_msg.Partition != 0 || string(other0_msg.Value) != "hello other" { - t.Error("Message was not as expected:", other0_msg) - } -} - -func TestConsumerReturnsNonconsumedErrorsOnClose(t *testing.T) { - consumer := NewConsumer(t, nil) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers) - - pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) - if err != nil { - t.Fatal(err) - } - - select { - case <-pc.Messages(): - t.Error("Did not epxect a message on the messages channel.") - case err := <-pc.Errors(): - if err.Err != sarama.ErrOutOfBrokers { - t.Error("Expected sarama.ErrOutOfBrokers, found", err) - } - } - - errs := pc.Close().(sarama.ConsumerErrors) - if len(errs) != 1 && errs[0].Err != sarama.ErrOutOfBrokers { - t.Error("Expected Close to return the remaining sarama.ErrOutOfBrokers") - } -} - -func TestConsumerWithoutExpectationsOnPartition(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - - _, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest) - if err != errOutOfExpectations { - t.Error("Expected ConsumePartition to return errOutOfExpectations") - } - - if err := consumer.Close(); err != nil { - t.Error("No error expected on close, but found:", err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } -} - -func TestConsumerWithExpectationsOnUnconsumedPartition(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")}) - - if err := consumer.Close(); err != nil { - t.Error("No error expected on close, but found:", err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } -} - -func TestConsumerWithWrongOffsetExpectation(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) - - _, err := consumer.ConsumePartition("test", 0, sarama.OffsetNewest) - if err != nil { - t.Error("Did not expect error, found:", err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } - - if err := consumer.Close(); err != nil { - t.Error(err) - } -} - -func TestConsumerViolatesMessagesDrainedExpectation(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) - pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")}) - pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")}) - pcmock.ExpectMessagesDrainedOnClose() - - pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) - if err != nil { - t.Error(err) - } - - // consume first message, not second one - <-pc.Messages() - - if err := consumer.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } -} - -func TestConsumerMeetsErrorsDrainedExpectation(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - - pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest) - pcmock.YieldError(sarama.ErrInvalidMessage) - pcmock.YieldError(sarama.ErrInvalidMessage) - pcmock.ExpectErrorsDrainedOnClose() - - pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest) - if err != nil { - t.Error(err) - } - - // consume first and second error, - <-pc.Errors() - <-pc.Errors() - - if err := consumer.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 0 { - t.Errorf("Expected no expectation failures to be set on the error reporter.") - } -} - -func TestConsumerTopicMetadata(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - - consumer.SetTopicMetadata(map[string][]int32{ - "test1": []int32{0, 1, 2, 3}, - "test2": []int32{0, 1, 2, 3, 4, 5, 6, 7}, - }) - - topics, err := consumer.Topics() - if err != nil { - t.Error(t) - } - - sortedTopics := sort.StringSlice(topics) - sortedTopics.Sort() - if len(sortedTopics) != 2 || sortedTopics[0] != "test1" || sortedTopics[1] != "test2" { - t.Error("Unexpected topics returned:", sortedTopics) - } - - partitions1, err := consumer.Partitions("test1") - if err != nil { - t.Error(t) - } - - if len(partitions1) != 4 { - t.Error("Unexpected partitions returned:", len(partitions1)) - } - - partitions2, err := consumer.Partitions("test2") - if err != nil { - t.Error(t) - } - - if len(partitions2) != 8 { - t.Error("Unexpected partitions returned:", len(partitions2)) - } - - if len(trm.errors) != 0 { - t.Errorf("Expected no expectation failures to be set on the error reporter.") - } -} - -func TestConsumerUnexpectedTopicMetadata(t *testing.T) { - trm := newTestReporterMock() - consumer := NewConsumer(trm, nil) - - if _, err := consumer.Topics(); err != sarama.ErrOutOfBrokers { - t.Error("Expected sarama.ErrOutOfBrokers, found", err) - } - - if len(trm.errors) != 1 { - t.Errorf("Expected an expectation failure to be set on the error reporter.") - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/mocks.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/mocks.go deleted file mode 100644 index ab24beebd..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/mocks.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Package mocks provides mocks that can be used for testing applications -that use Sarama. The mock types provided by this package implement the -interfaces Sarama exports, so you can use them for dependency injection -in your tests. - -All mock instances require you to set expectations on them before you -can use them. It will determine how the mock will behave. If an -expectation is not met, it will make your test fail. - -NOTE: this package currently does not fall under the API stability -guarantee of Sarama as it is still considered experimental. -*/ -package mocks - -import ( - "errors" - - "github.com/Shopify/sarama" -) - -// A simple interface that includes the testing.T methods we use to report -// expectation violations when using the mock objects. -type ErrorReporter interface { - Errorf(string, ...interface{}) -} - -var ( - errProduceSuccess error = nil - errOutOfExpectations = errors.New("No more expectations set on mock") - errPartitionConsumerNotStarted = errors.New("The partition consumer was never started") -) - -const AnyOffset int64 = -1000 - -type producerExpectation struct { - Result error -} - -type consumerExpectation struct { - Err error - Msg *sarama.ConsumerMessage -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer.go deleted file mode 100644 index be59ecdb4..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer.go +++ /dev/null @@ -1,93 +0,0 @@ -package mocks - -import ( - "github.com/Shopify/sarama" - "sync" -) - -// SyncProducer implements sarama's SyncProducer interface for testing purposes. -// Before you can use it, you have to set expectations on the mock SyncProducer -// to tell it how to handle calls to SendMessage, so you can easily test success -// and failure scenarios. -type SyncProducer struct { - l sync.Mutex - t ErrorReporter - expectations []*producerExpectation - lastOffset int64 -} - -// NewSyncProducer instantiates a new SyncProducer mock. The t argument should -// be the *testing.T instance of your test method. An error will be written to it if -// an expectation is violated. The config argument is currently unused, but is -// maintained to be compatible with the async Producer. -func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer { - return &SyncProducer{ - t: t, - expectations: make([]*producerExpectation, 0), - } -} - -//////////////////////////////////////////////// -// Implement SyncProducer interface -//////////////////////////////////////////////// - -// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation. -// You have to set expectations on the mock producer before calling SendMessage, so it knows -// how to handle them. If there is no more remaining expectations when SendMessage is called, -// the mock producer will write an error to the test state object. -func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) { - sp.l.Lock() - defer sp.l.Unlock() - - if len(sp.expectations) > 0 { - expectation := sp.expectations[0] - sp.expectations = sp.expectations[1:] - - if expectation.Result == errProduceSuccess { - sp.lastOffset++ - msg.Offset = sp.lastOffset - return 0, msg.Offset, nil - } else { - return -1, -1, expectation.Result - } - } else { - sp.t.Errorf("No more expectation set on this mock producer to handle the input message.") - return -1, -1, errOutOfExpectations - } -} - -// Close corresponds with the Close method of sarama's SyncProducer implementation. -// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow, -// so it will write an error to the test state if there's any remaining expectations. -func (sp *SyncProducer) Close() error { - sp.l.Lock() - defer sp.l.Unlock() - - if len(sp.expectations) > 0 { - sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations)) - } - - return nil -} - -//////////////////////////////////////////////// -// Setting expectations -//////////////////////////////////////////////// - -// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be -// called. The mock producer will handle the message as if it produced successfully, i.e. by -// returning a valid partition, and offset, and a nil error. -func (sp *SyncProducer) ExpectSendMessageAndSucceed() { - sp.l.Lock() - defer sp.l.Unlock() - sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess}) -} - -// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be -// called. The mock producer will handle the message as if it failed to produce -// successfully, i.e. by returning the provided error. -func (sp *SyncProducer) ExpectSendMessageAndFail(err error) { - sp.l.Lock() - defer sp.l.Unlock() - sp.expectations = append(sp.expectations, &producerExpectation{Result: err}) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer_test.go deleted file mode 100644 index a674138e9..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/mocks/sync_producer_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package mocks - -import ( - "testing" - - "github.com/Shopify/sarama" -) - -func TestMockSyncProducerImplementsSyncProducerInterface(t *testing.T) { - var mp interface{} = &SyncProducer{} - if _, ok := mp.(sarama.SyncProducer); !ok { - t.Error("The mock async producer should implement the sarama.SyncProducer interface.") - } -} - -func TestSyncProducerReturnsExpectationsToSendMessage(t *testing.T) { - sp := NewSyncProducer(t, nil) - defer func() { - if err := sp.Close(); err != nil { - t.Error(err) - } - }() - - sp.ExpectSendMessageAndSucceed() - sp.ExpectSendMessageAndSucceed() - sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers) - - msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - - _, offset, err := sp.SendMessage(msg) - if err != nil { - t.Errorf("The first message should have been produced successfully, but got %s", err) - } - if offset != 1 || offset != msg.Offset { - t.Errorf("The first message should have been assigned offset 1, but got %d", msg.Offset) - } - - _, offset, err = sp.SendMessage(msg) - if err != nil { - t.Errorf("The second message should have been produced successfully, but got %s", err) - } - if offset != 2 || offset != msg.Offset { - t.Errorf("The second message should have been assigned offset 2, but got %d", offset) - } - - _, _, err = sp.SendMessage(msg) - if err != sarama.ErrOutOfBrokers { - t.Errorf("The third message should not have been produced successfully") - } - - if err := sp.Close(); err != nil { - t.Error(err) - } -} - -func TestSyncProducerWithTooManyExpectations(t *testing.T) { - trm := newTestReporterMock() - - sp := NewSyncProducer(trm, nil) - sp.ExpectSendMessageAndSucceed() - sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers) - - msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - if _, _, err := sp.SendMessage(msg); err != nil { - t.Error("No error expected on first SendMessage call", err) - } - - if err := sp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} - -func TestSyncProducerWithTooFewExpectations(t *testing.T) { - trm := newTestReporterMock() - - sp := NewSyncProducer(trm, nil) - sp.ExpectSendMessageAndSucceed() - - msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} - if _, _, err := sp.SendMessage(msg); err != nil { - t.Error("No error expected on first SendMessage call", err) - } - if _, _, err := sp.SendMessage(msg); err != errOutOfExpectations { - t.Error("errOutOfExpectations expected on second SendMessage call, found:", err) - } - - if err := sp.Close(); err != nil { - t.Error(err) - } - - if len(trm.errors) != 1 { - t.Error("Expected to report an error") - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request.go deleted file mode 100644 index ba4ac76aa..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request.go +++ /dev/null @@ -1,172 +0,0 @@ -package sarama - -// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which -// tells the broker to set the timestamp to the time at which the request was received. -// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2. -const ReceiveTime int64 = -1 - -type offsetCommitRequestBlock struct { - offset int64 - timestamp int64 - metadata string -} - -func (r *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { - pe.putInt64(r.offset) - if version == 1 { - pe.putInt64(r.timestamp) - } else if r.timestamp != 0 { - Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored") - } - - return pe.putString(r.metadata) -} - -func (r *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) { - if r.offset, err = pd.getInt64(); err != nil { - return err - } - if version == 1 { - if r.timestamp, err = pd.getInt64(); err != nil { - return err - } - } - r.metadata, err = pd.getString() - return err -} - -type OffsetCommitRequest struct { - ConsumerGroup string - ConsumerGroupGeneration int32 // v1 or later - ConsumerID string // v1 or later - RetentionTime int64 // v2 or later - - // Version can be: - // - 0 (kafka 0.8.1 and later) - // - 1 (kafka 0.8.2 and later) - // - 2 (kafka 0.8.3 and later) - Version int16 - blocks map[string]map[int32]*offsetCommitRequestBlock -} - -func (r *OffsetCommitRequest) encode(pe packetEncoder) error { - if r.Version < 0 || r.Version > 2 { - return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} - } - - if err := pe.putString(r.ConsumerGroup); err != nil { - return err - } - - if r.Version >= 1 { - pe.putInt32(r.ConsumerGroupGeneration) - if err := pe.putString(r.ConsumerID); err != nil { - return err - } - } else { - if r.ConsumerGroupGeneration != 0 { - Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored") - } - if r.ConsumerID != "" { - Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored") - } - } - - if r.Version >= 2 { - pe.putInt64(r.RetentionTime) - } else if r.RetentionTime != 0 { - Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored") - } - - if err := pe.putArrayLength(len(r.blocks)); err != nil { - return err - } - for topic, partitions := range r.blocks { - if err := pe.putString(topic); err != nil { - return err - } - if err := pe.putArrayLength(len(partitions)); err != nil { - return err - } - for partition, block := range partitions { - pe.putInt32(partition) - if err := block.encode(pe, r.Version); err != nil { - return err - } - } - } - return nil -} - -func (r *OffsetCommitRequest) decode(pd packetDecoder) (err error) { - if r.ConsumerGroup, err = pd.getString(); err != nil { - return err - } - - if r.Version >= 1 { - if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil { - return err - } - if r.ConsumerID, err = pd.getString(); err != nil { - return err - } - } - - if r.Version >= 2 { - if r.RetentionTime, err = pd.getInt64(); err != nil { - return err - } - } - - topicCount, err := pd.getArrayLength() - if err != nil { - return err - } - if topicCount == 0 { - return nil - } - r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) - for i := 0; i < topicCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) - for j := 0; j < partitionCount; j++ { - partition, err := pd.getInt32() - if err != nil { - return err - } - block := &offsetCommitRequestBlock{} - if err := block.decode(pd, r.Version); err != nil { - return err - } - r.blocks[topic][partition] = block - } - } - return nil -} - -func (r *OffsetCommitRequest) key() int16 { - return 8 -} - -func (r *OffsetCommitRequest) version() int16 { - return r.Version -} - -func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { - if r.blocks == nil { - r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) - } - - if r.blocks[topic] == nil { - r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) - } - - r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata} -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request_test.go deleted file mode 100644 index afc25b7b3..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_request_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package sarama - -import "testing" - -var ( - offsetCommitRequestNoBlocksV0 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x00, 0x00} - - offsetCommitRequestNoBlocksV1 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x11, 0x22, - 0x00, 0x04, 'c', 'o', 'n', 's', - 0x00, 0x00, 0x00, 0x00} - - offsetCommitRequestNoBlocksV2 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x11, 0x22, - 0x00, 0x04, 'c', 'o', 'n', 's', - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33, - 0x00, 0x00, 0x00, 0x00} - - offsetCommitRequestOneBlockV0 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x52, 0x21, - 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, - 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} - - offsetCommitRequestOneBlockV1 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x11, 0x22, - 0x00, 0x04, 'c', 'o', 'n', 's', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x52, 0x21, - 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} - - offsetCommitRequestOneBlockV2 = []byte{ - 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', - 0x00, 0x00, 0x11, 0x22, - 0x00, 0x04, 'c', 'o', 'n', 's', - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x52, 0x21, - 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, - 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} -) - -func TestOffsetCommitRequestV0(t *testing.T) { - request := new(OffsetCommitRequest) - request.Version = 0 - request.ConsumerGroup = "foobar" - testRequest(t, "no blocks v0", request, offsetCommitRequestNoBlocksV0) - - request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata") - testRequest(t, "one block v0", request, offsetCommitRequestOneBlockV0) -} - -func TestOffsetCommitRequestV1(t *testing.T) { - request := new(OffsetCommitRequest) - request.ConsumerGroup = "foobar" - request.ConsumerID = "cons" - request.ConsumerGroupGeneration = 0x1122 - request.Version = 1 - testRequest(t, "no blocks v1", request, offsetCommitRequestNoBlocksV1) - - request.AddBlock("topic", 0x5221, 0xDEADBEEF, ReceiveTime, "metadata") - testRequest(t, "one block v1", request, offsetCommitRequestOneBlockV1) -} - -func TestOffsetCommitRequestV2(t *testing.T) { - request := new(OffsetCommitRequest) - request.ConsumerGroup = "foobar" - request.ConsumerID = "cons" - request.ConsumerGroupGeneration = 0x1122 - request.RetentionTime = 0x4433 - request.Version = 2 - testRequest(t, "no blocks v2", request, offsetCommitRequestNoBlocksV2) - - request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata") - testRequest(t, "one block v2", request, offsetCommitRequestOneBlockV2) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response.go deleted file mode 100644 index 573a3b6a1..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response.go +++ /dev/null @@ -1,73 +0,0 @@ -package sarama - -type OffsetCommitResponse struct { - Errors map[string]map[int32]KError -} - -func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) { - if r.Errors == nil { - r.Errors = make(map[string]map[int32]KError) - } - partitions := r.Errors[topic] - if partitions == nil { - partitions = make(map[int32]KError) - r.Errors[topic] = partitions - } - partitions[partition] = kerror -} - -func (r *OffsetCommitResponse) encode(pe packetEncoder) error { - if err := pe.putArrayLength(len(r.Errors)); err != nil { - return err - } - for topic, partitions := range r.Errors { - if err := pe.putString(topic); err != nil { - return err - } - if err := pe.putArrayLength(len(partitions)); err != nil { - return err - } - for partition, kerror := range partitions { - pe.putInt32(partition) - pe.putInt16(int16(kerror)) - } - } - return nil -} - -func (r *OffsetCommitResponse) decode(pd packetDecoder) (err error) { - numTopics, err := pd.getArrayLength() - if err != nil || numTopics == 0 { - return err - } - - r.Errors = make(map[string]map[int32]KError, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numErrors, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Errors[name] = make(map[int32]KError, numErrors) - - for j := 0; j < numErrors; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - tmp, err := pd.getInt16() - if err != nil { - return err - } - r.Errors[name][id] = KError(tmp) - } - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response_test.go deleted file mode 100644 index 074ec9232..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_commit_response_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package sarama - -import ( - "testing" -) - -var ( - emptyOffsetCommitResponse = []byte{ - 0x00, 0x00, 0x00, 0x00} -) - -func TestEmptyOffsetCommitResponse(t *testing.T) { - response := OffsetCommitResponse{} - testResponse(t, "empty", &response, emptyOffsetCommitResponse) -} - -func TestNormalOffsetCommitResponse(t *testing.T) { - response := OffsetCommitResponse{} - response.AddError("t", 0, ErrNotLeaderForPartition) - response.Errors["m"] = make(map[int32]KError) - // The response encoded form cannot be checked for it varies due to - // unpredictable map traversal order. - testResponse(t, "normal", &response, nil) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request.go deleted file mode 100644 index 30bbbbbd0..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request.go +++ /dev/null @@ -1,71 +0,0 @@ -package sarama - -type OffsetFetchRequest struct { - ConsumerGroup string - Version int16 - partitions map[string][]int32 -} - -func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { - if r.Version < 0 || r.Version > 1 { - return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} - } - - if err = pe.putString(r.ConsumerGroup); err != nil { - return err - } - if err = pe.putArrayLength(len(r.partitions)); err != nil { - return err - } - for topic, partitions := range r.partitions { - if err = pe.putString(topic); err != nil { - return err - } - if err = pe.putInt32Array(partitions); err != nil { - return err - } - } - return nil -} - -func (r *OffsetFetchRequest) decode(pd packetDecoder) (err error) { - if r.ConsumerGroup, err = pd.getString(); err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - if partitionCount == 0 { - return nil - } - r.partitions = make(map[string][]int32) - for i := 0; i < partitionCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitions, err := pd.getInt32Array() - if err != nil { - return err - } - r.partitions[topic] = partitions - } - return nil -} - -func (r *OffsetFetchRequest) key() int16 { - return 9 -} - -func (r *OffsetFetchRequest) version() int16 { - return r.Version -} - -func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) { - if r.partitions == nil { - r.partitions = make(map[string][]int32) - } - - r.partitions[topic] = append(r.partitions[topic], partitionID) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request_test.go deleted file mode 100644 index 025d725c9..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_request_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package sarama - -import "testing" - -var ( - offsetFetchRequestNoGroupNoPartitions = []byte{ - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - offsetFetchRequestNoPartitions = []byte{ - 0x00, 0x04, 'b', 'l', 'a', 'h', - 0x00, 0x00, 0x00, 0x00} - - offsetFetchRequestOnePartition = []byte{ - 0x00, 0x04, 'b', 'l', 'a', 'h', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x0D, 't', 'o', 'p', 'i', 'c', 'T', 'h', 'e', 'F', 'i', 'r', 's', 't', - 0x00, 0x00, 0x00, 0x01, - 0x4F, 0x4F, 0x4F, 0x4F} -) - -func TestOffsetFetchRequest(t *testing.T) { - request := new(OffsetFetchRequest) - testRequest(t, "no group, no partitions", request, offsetFetchRequestNoGroupNoPartitions) - - request.ConsumerGroup = "blah" - testRequest(t, "no partitions", request, offsetFetchRequestNoPartitions) - - request.AddPartition("topicTheFirst", 0x4F4F4F4F) - testRequest(t, "one partition", request, offsetFetchRequestOnePartition) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response.go deleted file mode 100644 index 93078c350..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response.go +++ /dev/null @@ -1,131 +0,0 @@ -package sarama - -type OffsetFetchResponseBlock struct { - Offset int64 - Metadata string - Err KError -} - -func (r *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { - r.Offset, err = pd.getInt64() - if err != nil { - return err - } - - r.Metadata, err = pd.getString() - if err != nil { - return err - } - - tmp, err := pd.getInt16() - if err != nil { - return err - } - r.Err = KError(tmp) - - return nil -} - -func (r *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { - pe.putInt64(r.Offset) - - err = pe.putString(r.Metadata) - if err != nil { - return err - } - - pe.putInt16(int16(r.Err)) - - return nil -} - -type OffsetFetchResponse struct { - Blocks map[string]map[int32]*OffsetFetchResponseBlock -} - -func (r *OffsetFetchResponse) encode(pe packetEncoder) error { - if err := pe.putArrayLength(len(r.Blocks)); err != nil { - return err - } - for topic, partitions := range r.Blocks { - if err := pe.putString(topic); err != nil { - return err - } - if err := pe.putArrayLength(len(partitions)); err != nil { - return err - } - for partition, block := range partitions { - pe.putInt32(partition) - if err := block.encode(pe); err != nil { - return err - } - } - } - return nil -} - -func (r *OffsetFetchResponse) decode(pd packetDecoder) (err error) { - numTopics, err := pd.getArrayLength() - if err != nil || numTopics == 0 { - return err - } - - r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numBlocks, err := pd.getArrayLength() - if err != nil { - return err - } - - if numBlocks == 0 { - r.Blocks[name] = nil - continue - } - r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) - - for j := 0; j < numBlocks; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - block := new(OffsetFetchResponseBlock) - err = block.decode(pd) - if err != nil { - return err - } - r.Blocks[name][id] = block - } - } - - return nil -} - -func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { - if r.Blocks == nil { - return nil - } - - if r.Blocks[topic] == nil { - return nil - } - - return r.Blocks[topic][partition] -} - -func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) { - if r.Blocks == nil { - r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock) - } - partitions := r.Blocks[topic] - if partitions == nil { - partitions = make(map[int32]*OffsetFetchResponseBlock) - r.Blocks[topic] = partitions - } - partitions[partition] = block -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response_test.go deleted file mode 100644 index 7614ae424..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_fetch_response_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyOffsetFetchResponse = []byte{ - 0x00, 0x00, 0x00, 0x00} -) - -func TestEmptyOffsetFetchResponse(t *testing.T) { - response := OffsetFetchResponse{} - testResponse(t, "empty", &response, emptyOffsetFetchResponse) -} - -func TestNormalOffsetFetchResponse(t *testing.T) { - response := OffsetFetchResponse{} - response.AddBlock("t", 0, &OffsetFetchResponseBlock{0, "md", ErrRequestTimedOut}) - response.Blocks["m"] = nil - // The response encoded form cannot be checked for it varies due to - // unpredictable map traversal order. - testResponse(t, "normal", &response, nil) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request.go deleted file mode 100644 index 842d5c0f8..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request.go +++ /dev/null @@ -1,113 +0,0 @@ -package sarama - -type offsetRequestBlock struct { - time int64 - maxOffsets int32 -} - -func (r *offsetRequestBlock) encode(pe packetEncoder) error { - pe.putInt64(int64(r.time)) - pe.putInt32(r.maxOffsets) - return nil -} - -func (r *offsetRequestBlock) decode(pd packetDecoder) (err error) { - if r.time, err = pd.getInt64(); err != nil { - return err - } - if r.maxOffsets, err = pd.getInt32(); err != nil { - return err - } - return nil -} - -type OffsetRequest struct { - blocks map[string]map[int32]*offsetRequestBlock -} - -func (r *OffsetRequest) encode(pe packetEncoder) error { - pe.putInt32(-1) // replica ID is always -1 for clients - err := pe.putArrayLength(len(r.blocks)) - if err != nil { - return err - } - for topic, partitions := range r.blocks { - err = pe.putString(topic) - if err != nil { - return err - } - err = pe.putArrayLength(len(partitions)) - if err != nil { - return err - } - for partition, block := range partitions { - pe.putInt32(partition) - if err = block.encode(pe); err != nil { - return err - } - } - } - return nil -} - -func (r *OffsetRequest) decode(pd packetDecoder) error { - // Ignore replica ID - if _, err := pd.getInt32(); err != nil { - return err - } - blockCount, err := pd.getArrayLength() - if err != nil { - return err - } - if blockCount == 0 { - return nil - } - r.blocks = make(map[string]map[int32]*offsetRequestBlock) - for i := 0; i < blockCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - r.blocks[topic] = make(map[int32]*offsetRequestBlock) - for j := 0; j < partitionCount; j++ { - partition, err := pd.getInt32() - if err != nil { - return err - } - block := &offsetRequestBlock{} - if err := block.decode(pd); err != nil { - return err - } - r.blocks[topic][partition] = block - } - } - return nil -} - -func (r *OffsetRequest) key() int16 { - return 2 -} - -func (r *OffsetRequest) version() int16 { - return 0 -} - -func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { - if r.blocks == nil { - r.blocks = make(map[string]map[int32]*offsetRequestBlock) - } - - if r.blocks[topic] == nil { - r.blocks[topic] = make(map[int32]*offsetRequestBlock) - } - - tmp := new(offsetRequestBlock) - tmp.time = time - tmp.maxOffsets = maxOffsets - - r.blocks[topic][partitionID] = tmp -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request_test.go deleted file mode 100644 index f3b3046bb..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_request_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package sarama - -import "testing" - -var ( - offsetRequestNoBlocks = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x00} - - offsetRequestOneBlock = []byte{ - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x04, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x02} -) - -func TestOffsetRequest(t *testing.T) { - request := new(OffsetRequest) - testRequest(t, "no blocks", request, offsetRequestNoBlocks) - - request.AddBlock("foo", 4, 1, 2) - testRequest(t, "one block", request, offsetRequestOneBlock) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response.go deleted file mode 100644 index 07d71ca72..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response.go +++ /dev/null @@ -1,130 +0,0 @@ -package sarama - -type OffsetResponseBlock struct { - Err KError - Offsets []int64 -} - -func (r *OffsetResponseBlock) decode(pd packetDecoder) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - r.Err = KError(tmp) - - r.Offsets, err = pd.getInt64Array() - - return err -} - -func (r *OffsetResponseBlock) encode(pe packetEncoder) (err error) { - pe.putInt16(int16(r.Err)) - - return pe.putInt64Array(r.Offsets) -} - -type OffsetResponse struct { - Blocks map[string]map[int32]*OffsetResponseBlock -} - -func (r *OffsetResponse) decode(pd packetDecoder) (err error) { - numTopics, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numBlocks, err := pd.getArrayLength() - if err != nil { - return err - } - - r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks) - - for j := 0; j < numBlocks; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - block := new(OffsetResponseBlock) - err = block.decode(pd) - if err != nil { - return err - } - r.Blocks[name][id] = block - } - } - - return nil -} - -func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock { - if r.Blocks == nil { - return nil - } - - if r.Blocks[topic] == nil { - return nil - } - - return r.Blocks[topic][partition] -} - -/* -// [0 0 0 1 ntopics -0 8 109 121 95 116 111 112 105 99 topic -0 0 0 1 npartitions -0 0 0 0 id -0 0 - -0 0 0 1 0 0 0 0 -0 1 1 1 0 0 0 1 -0 8 109 121 95 116 111 112 -105 99 0 0 0 1 0 0 -0 0 0 0 0 0 0 1 -0 0 0 0 0 1 1 1] - -*/ -func (r *OffsetResponse) encode(pe packetEncoder) (err error) { - if err = pe.putArrayLength(len(r.Blocks)); err != nil { - return err - } - - for topic, partitions := range r.Blocks { - if err = pe.putString(topic); err != nil { - return err - } - if err = pe.putArrayLength(len(partitions)); err != nil { - return err - } - for partition, block := range partitions { - pe.putInt32(partition) - if err = block.encode(pe); err != nil { - return err - } - } - } - - return nil -} - -// testing API - -func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { - if r.Blocks == nil { - r.Blocks = make(map[string]map[int32]*OffsetResponseBlock) - } - byTopic, ok := r.Blocks[topic] - if !ok { - byTopic = make(map[int32]*OffsetResponseBlock) - r.Blocks[topic] = byTopic - } - byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}} -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response_test.go deleted file mode 100644 index a427cbd20..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/offset_response_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package sarama - -import "testing" - -var ( - emptyOffsetResponse = []byte{ - 0x00, 0x00, 0x00, 0x00} - - normalOffsetResponse = []byte{ - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x01, 'a', - 0x00, 0x00, 0x00, 0x00, - - 0x00, 0x01, 'z', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06} -) - -func TestEmptyOffsetResponse(t *testing.T) { - response := OffsetResponse{} - - testDecodable(t, "empty", &response, emptyOffsetResponse) - if len(response.Blocks) != 0 { - t.Error("Decoding produced", len(response.Blocks), "topics where there were none.") - } -} - -func TestNormalOffsetResponse(t *testing.T) { - response := OffsetResponse{} - - testDecodable(t, "normal", &response, normalOffsetResponse) - - if len(response.Blocks) != 2 { - t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.") - } - - if len(response.Blocks["a"]) != 0 { - t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.") - } - - if len(response.Blocks["z"]) != 1 { - t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.") - } - - if response.Blocks["z"][2].Err != ErrNoError { - t.Fatal("Decoding produced invalid error for topic z partition 2.") - } - - if len(response.Blocks["z"][2].Offsets) != 2 { - t.Fatal("Decoding produced invalid number of offsets for topic z partition 2.") - } - - if response.Blocks["z"][2].Offsets[0] != 5 || response.Blocks["z"][2].Offsets[1] != 6 { - t.Fatal("Decoding produced invalid offsets for topic z partition 2.") - } - -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/packet_decoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/packet_decoder.go deleted file mode 100644 index 034222313..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/packet_decoder.go +++ /dev/null @@ -1,44 +0,0 @@ -package sarama - -// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules. -// Types implementing Decoder only need to worry about calling methods like GetString, -// not about how a string is represented in Kafka. -type packetDecoder interface { - // Primitives - getInt8() (int8, error) - getInt16() (int16, error) - getInt32() (int32, error) - getInt64() (int64, error) - getArrayLength() (int, error) - - // Collections - getBytes() ([]byte, error) - getString() (string, error) - getInt32Array() ([]int32, error) - getInt64Array() ([]int64, error) - - // Subsets - remaining() int - getSubset(length int) (packetDecoder, error) - - // Stacks, see PushDecoder - push(in pushDecoder) error - pop() error -} - -// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity -// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where -// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they -// depend upon have been decoded. -type pushDecoder interface { - // Saves the offset into the input buffer as the location to actually read the calculated value when able. - saveOffset(in int) - - // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). - reserveLength() int - - // Indicates that all required data is now available to calculate and check the field. - // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes - // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. - check(curOffset int, buf []byte) error -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/packet_encoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/packet_encoder.go deleted file mode 100644 index 2c5710938..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/packet_encoder.go +++ /dev/null @@ -1,41 +0,0 @@ -package sarama - -// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules. -// Types implementing Encoder only need to worry about calling methods like PutString, -// not about how a string is represented in Kafka. -type packetEncoder interface { - // Primitives - putInt8(in int8) - putInt16(in int16) - putInt32(in int32) - putInt64(in int64) - putArrayLength(in int) error - - // Collections - putBytes(in []byte) error - putRawBytes(in []byte) error - putString(in string) error - putInt32Array(in []int32) error - putInt64Array(in []int64) error - - // Stacks, see PushEncoder - push(in pushEncoder) - pop() error -} - -// PushEncoder is the interface for encoding fields like CRCs and lengths where the value -// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where -// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they -// depend upon have been written. -type pushEncoder interface { - // Saves the offset into the input buffer as the location to actually write the calculated value when able. - saveOffset(in int) - - // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32). - reserveLength() int - - // Indicates that all required data is now available to calculate and write the field. - // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes - // of data to the saved offset, based on the data between the saved offset and curOffset. - run(curOffset int, buf []byte) error -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner.go b/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner.go deleted file mode 100644 index 493ee0ec3..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner.go +++ /dev/null @@ -1,120 +0,0 @@ -package sarama - -import ( - "hash" - "hash/fnv" - "math/rand" - "time" -) - -// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1], -// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided -// as simple default implementations. -type Partitioner interface { - Partition(message *ProducerMessage, numPartitions int32) (int32, error) // Partition takes a message and partition count and chooses a partition - - // RequiresConsistency indicates to the user of the partitioner whether the mapping of key->partition is consistent or not. - // Specifically, if a partitioner requires consistency then it must be allowed to choose from all partitions (even ones known to - // be unavailable), and its choice must be respected by the caller. The obvious example is the HashPartitioner. - RequiresConsistency() bool -} - -// PartitionerConstructor is the type for a function capable of constructing new Partitioners. -type PartitionerConstructor func(topic string) Partitioner - -type manualPartitioner struct{} - -// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided -// ProducerMessage's Partition field as the partition to produce to. -func NewManualPartitioner(topic string) Partitioner { - return new(manualPartitioner) -} - -func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { - return message.Partition, nil -} - -func (p *manualPartitioner) RequiresConsistency() bool { - return true -} - -type randomPartitioner struct { - generator *rand.Rand -} - -// NewRandomPartitioner returns a Partitioner which chooses a random partition each time. -func NewRandomPartitioner(topic string) Partitioner { - p := new(randomPartitioner) - p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) - return p -} - -func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { - return int32(p.generator.Intn(int(numPartitions))), nil -} - -func (p *randomPartitioner) RequiresConsistency() bool { - return false -} - -type roundRobinPartitioner struct { - partition int32 -} - -// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time. -func NewRoundRobinPartitioner(topic string) Partitioner { - return &roundRobinPartitioner{} -} - -func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { - if p.partition >= numPartitions { - p.partition = 0 - } - ret := p.partition - p.partition++ - return ret, nil -} - -func (p *roundRobinPartitioner) RequiresConsistency() bool { - return false -} - -type hashPartitioner struct { - random Partitioner - hasher hash.Hash32 -} - -// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil, or fails to -// encode, then a random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key -// is used, modulus the number of partitions. This ensures that messages with the same key always end up on the -// same partition. -func NewHashPartitioner(topic string) Partitioner { - p := new(hashPartitioner) - p.random = NewRandomPartitioner(topic) - p.hasher = fnv.New32a() - return p -} - -func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { - if message.Key == nil { - return p.random.Partition(message, numPartitions) - } - bytes, err := message.Key.Encode() - if err != nil { - return -1, err - } - p.hasher.Reset() - _, err = p.hasher.Write(bytes) - if err != nil { - return -1, err - } - hash := int32(p.hasher.Sum32()) - if hash < 0 { - hash = -hash - } - return hash % numPartitions, nil -} - -func (p *hashPartitioner) RequiresConsistency() bool { - return true -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner_test.go deleted file mode 100644 index f44c509d6..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/partitioner_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package sarama - -import ( - "crypto/rand" - "log" - "testing" -) - -func assertPartitioningConsistent(t *testing.T, partitioner Partitioner, message *ProducerMessage, numPartitions int32) { - choice, err := partitioner.Partition(message, numPartitions) - if err != nil { - t.Error(partitioner, err) - } - if choice < 0 || choice >= numPartitions { - t.Error(partitioner, "returned partition", choice, "outside of range for", message) - } - for i := 1; i < 50; i++ { - newChoice, err := partitioner.Partition(message, numPartitions) - if err != nil { - t.Error(partitioner, err) - } - if newChoice != choice { - t.Error(partitioner, "returned partition", newChoice, "inconsistent with", choice, ".") - } - } -} - -func TestRandomPartitioner(t *testing.T) { - partitioner := NewRandomPartitioner("mytopic") - - choice, err := partitioner.Partition(nil, 1) - if err != nil { - t.Error(partitioner, err) - } - if choice != 0 { - t.Error("Returned non-zero partition when only one available.") - } - - for i := 1; i < 50; i++ { - choice, err := partitioner.Partition(nil, 50) - if err != nil { - t.Error(partitioner, err) - } - if choice < 0 || choice >= 50 { - t.Error("Returned partition", choice, "outside of range.") - } - } -} - -func TestRoundRobinPartitioner(t *testing.T) { - partitioner := NewRoundRobinPartitioner("mytopic") - - choice, err := partitioner.Partition(nil, 1) - if err != nil { - t.Error(partitioner, err) - } - if choice != 0 { - t.Error("Returned non-zero partition when only one available.") - } - - var i int32 - for i = 1; i < 50; i++ { - choice, err := partitioner.Partition(nil, 7) - if err != nil { - t.Error(partitioner, err) - } - if choice != i%7 { - t.Error("Returned partition", choice, "expecting", i%7) - } - } -} - -func TestHashPartitioner(t *testing.T) { - partitioner := NewHashPartitioner("mytopic") - - choice, err := partitioner.Partition(&ProducerMessage{}, 1) - if err != nil { - t.Error(partitioner, err) - } - if choice != 0 { - t.Error("Returned non-zero partition when only one available.") - } - - for i := 1; i < 50; i++ { - choice, err := partitioner.Partition(&ProducerMessage{}, 50) - if err != nil { - t.Error(partitioner, err) - } - if choice < 0 || choice >= 50 { - t.Error("Returned partition", choice, "outside of range for nil key.") - } - } - - buf := make([]byte, 256) - for i := 1; i < 50; i++ { - if _, err := rand.Read(buf); err != nil { - t.Error(err) - } - assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50) - } -} - -func TestManualPartitioner(t *testing.T) { - partitioner := NewManualPartitioner("mytopic") - - choice, err := partitioner.Partition(&ProducerMessage{}, 1) - if err != nil { - t.Error(partitioner, err) - } - if choice != 0 { - t.Error("Returned non-zero partition when only one available.") - } - - for i := int32(1); i < 50; i++ { - choice, err := partitioner.Partition(&ProducerMessage{Partition: i}, 50) - if err != nil { - t.Error(partitioner, err) - } - if choice != i { - t.Error("Returned partition not the same as the input partition") - } - } -} - -// By default, Sarama uses the message's key to consistently assign a partition to -// a message using hashing. If no key is set, a random partition will be chosen. -// This example shows how you can partition messages randomly, even when a key is set, -// by overriding Config.Producer.Partitioner. -func ExamplePartitioner_random() { - config := NewConfig() - config.Producer.Partitioner = NewRandomPartitioner - - producer, err := NewSyncProducer([]string{"localhost:9092"}, config) - if err != nil { - log.Fatal(err) - } - defer func() { - if err := producer.Close(); err != nil { - log.Println("Failed to close producer:", err) - } - }() - - msg := &ProducerMessage{Topic: "test", Key: StringEncoder("key is set"), Value: StringEncoder("test")} - partition, offset, err := producer.SendMessage(msg) - if err != nil { - log.Fatalln("Failed to produce message to kafka cluster.") - } - - log.Printf("Produced message to partition %d with offset %d", partition, offset) -} - -// This example shows how to assign partitions to your messages manually. -func ExamplePartitioner_manual() { - config := NewConfig() - - // First, we tell the producer that we are going to partition ourselves. - config.Producer.Partitioner = NewManualPartitioner - - producer, err := NewSyncProducer([]string{"localhost:9092"}, config) - if err != nil { - log.Fatal(err) - } - defer func() { - if err := producer.Close(); err != nil { - log.Println("Failed to close producer:", err) - } - }() - - // Now, we set the Partition field of the ProducerMessage struct. - msg := &ProducerMessage{Topic: "test", Partition: 6, Value: StringEncoder("test")} - - partition, offset, err := producer.SendMessage(msg) - if err != nil { - log.Fatalln("Failed to produce message to kafka cluster.") - } - - if partition != 6 { - log.Fatal("Message should have been produced to partition 6!") - } - - log.Printf("Produced message to partition %d with offset %d", partition, offset) -} - -// This example shows how to set a different partitioner depending on the topic. -func ExamplePartitioner_per_topic() { - config := NewConfig() - config.Producer.Partitioner = func(topic string) Partitioner { - switch topic { - case "access_log", "error_log": - return NewRandomPartitioner(topic) - - default: - return NewHashPartitioner(topic) - } - } - - // ... -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/prep_encoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/prep_encoder.go deleted file mode 100644 index ddeef780e..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/prep_encoder.go +++ /dev/null @@ -1,95 +0,0 @@ -package sarama - -import ( - "fmt" - "math" -) - -type prepEncoder struct { - length int -} - -// primitives - -func (pe *prepEncoder) putInt8(in int8) { - pe.length += 1 -} - -func (pe *prepEncoder) putInt16(in int16) { - pe.length += 2 -} - -func (pe *prepEncoder) putInt32(in int32) { - pe.length += 4 -} - -func (pe *prepEncoder) putInt64(in int64) { - pe.length += 8 -} - -func (pe *prepEncoder) putArrayLength(in int) error { - if in > math.MaxInt32 { - return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} - } - pe.length += 4 - return nil -} - -// arrays - -func (pe *prepEncoder) putBytes(in []byte) error { - pe.length += 4 - if in == nil { - return nil - } - if len(in) > math.MaxInt32 { - return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} - } - pe.length += len(in) - return nil -} - -func (pe *prepEncoder) putRawBytes(in []byte) error { - if len(in) > math.MaxInt32 { - return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} - } - pe.length += len(in) - return nil -} - -func (pe *prepEncoder) putString(in string) error { - pe.length += 2 - if len(in) > math.MaxInt16 { - return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))} - } - pe.length += len(in) - return nil -} - -func (pe *prepEncoder) putInt32Array(in []int32) error { - err := pe.putArrayLength(len(in)) - if err != nil { - return err - } - pe.length += 4 * len(in) - return nil -} - -func (pe *prepEncoder) putInt64Array(in []int64) error { - err := pe.putArrayLength(len(in)) - if err != nil { - return err - } - pe.length += 8 * len(in) - return nil -} - -// stackable - -func (pe *prepEncoder) push(in pushEncoder) { - pe.length += in.reserveLength() -} - -func (pe *prepEncoder) pop() error { - return nil -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request.go deleted file mode 100644 index f21956137..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request.go +++ /dev/null @@ -1,148 +0,0 @@ -package sarama - -// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements -// it must see before responding. Any of the constants defined here are valid. On broker versions -// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many -// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced -// by setting the `min.isr` value in the brokers configuration). -type RequiredAcks int16 - -const ( - // NoResponse doesn't send any response, the TCP ACK is all you get. - NoResponse RequiredAcks = 0 - // WaitForLocal waits for only the local commit to succeed before responding. - WaitForLocal RequiredAcks = 1 - // WaitForAll waits for all replicas to commit before responding. - WaitForAll RequiredAcks = -1 -) - -type ProduceRequest struct { - RequiredAcks RequiredAcks - Timeout int32 - msgSets map[string]map[int32]*MessageSet -} - -func (p *ProduceRequest) encode(pe packetEncoder) error { - pe.putInt16(int16(p.RequiredAcks)) - pe.putInt32(p.Timeout) - err := pe.putArrayLength(len(p.msgSets)) - if err != nil { - return err - } - for topic, partitions := range p.msgSets { - err = pe.putString(topic) - if err != nil { - return err - } - err = pe.putArrayLength(len(partitions)) - if err != nil { - return err - } - for id, msgSet := range partitions { - pe.putInt32(id) - pe.push(&lengthField{}) - err = msgSet.encode(pe) - if err != nil { - return err - } - err = pe.pop() - if err != nil { - return err - } - } - } - return nil -} - -func (p *ProduceRequest) decode(pd packetDecoder) error { - requiredAcks, err := pd.getInt16() - if err != nil { - return err - } - p.RequiredAcks = RequiredAcks(requiredAcks) - if p.Timeout, err = pd.getInt32(); err != nil { - return err - } - topicCount, err := pd.getArrayLength() - if err != nil { - return err - } - if topicCount == 0 { - return nil - } - p.msgSets = make(map[string]map[int32]*MessageSet) - for i := 0; i < topicCount; i++ { - topic, err := pd.getString() - if err != nil { - return err - } - partitionCount, err := pd.getArrayLength() - if err != nil { - return err - } - p.msgSets[topic] = make(map[int32]*MessageSet) - for j := 0; j < partitionCount; j++ { - partition, err := pd.getInt32() - if err != nil { - return err - } - messageSetSize, err := pd.getInt32() - if err != nil { - return err - } - if messageSetSize == 0 { - continue - } - msgSetDecoder, err := pd.getSubset(int(messageSetSize)) - if err != nil { - return err - } - msgSet := &MessageSet{} - err = msgSet.decode(msgSetDecoder) - if err != nil { - return err - } - p.msgSets[topic][partition] = msgSet - } - } - return nil -} - -func (p *ProduceRequest) key() int16 { - return 0 -} - -func (p *ProduceRequest) version() int16 { - return 0 -} - -func (p *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { - if p.msgSets == nil { - p.msgSets = make(map[string]map[int32]*MessageSet) - } - - if p.msgSets[topic] == nil { - p.msgSets[topic] = make(map[int32]*MessageSet) - } - - set := p.msgSets[topic][partition] - - if set == nil { - set = new(MessageSet) - p.msgSets[topic][partition] = set - } - - set.addMessage(msg) -} - -func (p *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { - if p.msgSets == nil { - p.msgSets = make(map[string]map[int32]*MessageSet) - } - - if p.msgSets[topic] == nil { - p.msgSets[topic] = make(map[int32]*MessageSet) - } - - p.msgSets[topic][partition] = set -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request_test.go deleted file mode 100644 index 21f4ba5b1..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_request_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package sarama - -import ( - "testing" -) - -var ( - produceRequestEmpty = []byte{ - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00} - - produceRequestHeader = []byte{ - 0x01, 0x23, - 0x00, 0x00, 0x04, 0x44, - 0x00, 0x00, 0x00, 0x00} - - produceRequestOneMessage = []byte{ - 0x01, 0x23, - 0x00, 0x00, 0x04, 0x44, - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x05, 't', 'o', 'p', 'i', 'c', - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0xAD, - 0x00, 0x00, 0x00, 0x1C, - // messageSet - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x10, - // message - 0x23, 0x96, 0x4a, 0xf7, // CRC - 0x00, - 0x00, - 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE} -) - -func TestProduceRequest(t *testing.T) { - request := new(ProduceRequest) - testRequest(t, "empty", request, produceRequestEmpty) - - request.RequiredAcks = 0x123 - request.Timeout = 0x444 - testRequest(t, "header", request, produceRequestHeader) - - request.AddMessage("topic", 0xAD, &Message{Codec: CompressionNone, Key: nil, Value: []byte{0x00, 0xEE}}) - testRequest(t, "one message", request, produceRequestOneMessage) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response.go b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response.go deleted file mode 100644 index 1f49a8560..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response.go +++ /dev/null @@ -1,112 +0,0 @@ -package sarama - -type ProduceResponseBlock struct { - Err KError - Offset int64 -} - -func (pr *ProduceResponseBlock) decode(pd packetDecoder) (err error) { - tmp, err := pd.getInt16() - if err != nil { - return err - } - pr.Err = KError(tmp) - - pr.Offset, err = pd.getInt64() - if err != nil { - return err - } - - return nil -} - -type ProduceResponse struct { - Blocks map[string]map[int32]*ProduceResponseBlock -} - -func (pr *ProduceResponse) decode(pd packetDecoder) (err error) { - numTopics, err := pd.getArrayLength() - if err != nil { - return err - } - - pr.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics) - for i := 0; i < numTopics; i++ { - name, err := pd.getString() - if err != nil { - return err - } - - numBlocks, err := pd.getArrayLength() - if err != nil { - return err - } - - pr.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks) - - for j := 0; j < numBlocks; j++ { - id, err := pd.getInt32() - if err != nil { - return err - } - - block := new(ProduceResponseBlock) - err = block.decode(pd) - if err != nil { - return err - } - pr.Blocks[name][id] = block - } - } - - return nil -} - -func (pr *ProduceResponse) encode(pe packetEncoder) error { - err := pe.putArrayLength(len(pr.Blocks)) - if err != nil { - return err - } - for topic, partitions := range pr.Blocks { - err = pe.putString(topic) - if err != nil { - return err - } - err = pe.putArrayLength(len(partitions)) - if err != nil { - return err - } - for id, prb := range partitions { - pe.putInt32(id) - pe.putInt16(int16(prb.Err)) - pe.putInt64(prb.Offset) - } - } - return nil -} - -func (pr *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { - if pr.Blocks == nil { - return nil - } - - if pr.Blocks[topic] == nil { - return nil - } - - return pr.Blocks[topic][partition] -} - -// Testing API - -func (pr *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) { - if pr.Blocks == nil { - pr.Blocks = make(map[string]map[int32]*ProduceResponseBlock) - } - byTopic, ok := pr.Blocks[topic] - if !ok { - byTopic = make(map[int32]*ProduceResponseBlock) - pr.Blocks[topic] = byTopic - } - byTopic[partition] = &ProduceResponseBlock{Err: err} -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response_test.go deleted file mode 100644 index 5c3131af4..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/produce_response_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package sarama - -import "testing" - -var ( - produceResponseNoBlocks = []byte{ - 0x00, 0x00, 0x00, 0x00} - - produceResponseManyBlocks = []byte{ - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x03, 'f', 'o', 'o', - 0x00, 0x00, 0x00, 0x00, - - 0x00, 0x03, 'b', 'a', 'r', - 0x00, 0x00, 0x00, 0x02, - - 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, - - 0x00, 0x00, 0x00, 0x02, - 0x00, 0x02, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} -) - -func TestProduceResponse(t *testing.T) { - response := ProduceResponse{} - - testDecodable(t, "no blocks", &response, produceResponseNoBlocks) - if len(response.Blocks) != 0 { - t.Error("Decoding produced", len(response.Blocks), "topics where there were none") - } - - testDecodable(t, "many blocks", &response, produceResponseManyBlocks) - if len(response.Blocks) != 2 { - t.Error("Decoding produced", len(response.Blocks), "topics where there were 2") - } - if len(response.Blocks["foo"]) != 0 { - t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there were none") - } - if len(response.Blocks["bar"]) != 2 { - t.Error("Decoding produced", len(response.Blocks["bar"]), "partitions for 'bar' where there were two") - } - block := response.GetBlock("bar", 1) - if block == nil { - t.Error("Decoding did not produce a block for bar/1") - } else { - if block.Err != ErrNoError { - t.Error("Decoding failed for bar/1/Err, got:", int16(block.Err)) - } - if block.Offset != 0xFF { - t.Error("Decoding failed for bar/1/Offset, got:", block.Offset) - } - } - block = response.GetBlock("bar", 2) - if block == nil { - t.Error("Decoding did not produce a block for bar/2") - } else { - if block.Err != ErrInvalidMessage { - t.Error("Decoding failed for bar/2/Err, got:", int16(block.Err)) - } - if block.Offset != 0 { - t.Error("Decoding failed for bar/2/Offset, got:", block.Offset) - } - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/real_decoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/real_decoder.go deleted file mode 100644 index b194b9bcc..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/real_decoder.go +++ /dev/null @@ -1,225 +0,0 @@ -package sarama - -import ( - "encoding/binary" - "math" -) - -type realDecoder struct { - raw []byte - off int - stack []pushDecoder -} - -// primitives - -func (rd *realDecoder) getInt8() (int8, error) { - if rd.remaining() < 1 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int8(rd.raw[rd.off]) - rd.off += 1 - return tmp, nil -} - -func (rd *realDecoder) getInt16() (int16, error) { - if rd.remaining() < 2 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:])) - rd.off += 2 - return tmp, nil -} - -func (rd *realDecoder) getInt32() (int32, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - return tmp, nil -} - -func (rd *realDecoder) getInt64() (int64, error) { - if rd.remaining() < 8 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) - rd.off += 8 - return tmp, nil -} - -func (rd *realDecoder) getArrayLength() (int, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } - tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - if tmp > rd.remaining() { - rd.off = len(rd.raw) - return -1, ErrInsufficientData - } else if tmp > 2*math.MaxUint16 { - return -1, PacketDecodingError{"invalid array length"} - } - return tmp, nil -} - -// collections - -func (rd *realDecoder) getBytes() ([]byte, error) { - tmp, err := rd.getInt32() - - if err != nil { - return nil, err - } - - n := int(tmp) - - switch { - case n < -1: - return nil, PacketDecodingError{"invalid byteslice length"} - case n == -1: - return nil, nil - case n == 0: - return make([]byte, 0), nil - case n > rd.remaining(): - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - - tmpStr := rd.raw[rd.off : rd.off+n] - rd.off += n - return tmpStr, nil -} - -func (rd *realDecoder) getString() (string, error) { - tmp, err := rd.getInt16() - - if err != nil { - return "", err - } - - n := int(tmp) - - switch { - case n < -1: - return "", PacketDecodingError{"invalid string length"} - case n == -1: - return "", nil - case n == 0: - return "", nil - case n > rd.remaining(): - rd.off = len(rd.raw) - return "", ErrInsufficientData - } - - tmpStr := string(rd.raw[rd.off : rd.off+n]) - rd.off += n - return tmpStr, nil -} - -func (rd *realDecoder) getInt32Array() ([]int32, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - - if rd.remaining() < 4*n { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - - if n == 0 { - return nil, nil - } - - if n < 0 { - return nil, PacketDecodingError{"invalid array length"} - } - - ret := make([]int32, n) - for i := range ret { - ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - } - return ret, nil -} - -func (rd *realDecoder) getInt64Array() ([]int64, error) { - if rd.remaining() < 4 { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) - rd.off += 4 - - if rd.remaining() < 8*n { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - - if n == 0 { - return nil, nil - } - - if n < 0 { - return nil, PacketDecodingError{"invalid array length"} - } - - ret := make([]int64, n) - for i := range ret { - ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) - rd.off += 8 - } - return ret, nil -} - -// subsets - -func (rd *realDecoder) remaining() int { - return len(rd.raw) - rd.off -} - -func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { - if length > rd.remaining() { - rd.off = len(rd.raw) - return nil, ErrInsufficientData - } - - start := rd.off - rd.off += length - return &realDecoder{raw: rd.raw[start:rd.off]}, nil -} - -// stacks - -func (rd *realDecoder) push(in pushDecoder) error { - in.saveOffset(rd.off) - - reserve := in.reserveLength() - if rd.remaining() < reserve { - rd.off = len(rd.raw) - return ErrInsufficientData - } - - rd.stack = append(rd.stack, in) - - rd.off += reserve - - return nil -} - -func (rd *realDecoder) pop() error { - // this is go's ugly pop pattern (the inverse of append) - in := rd.stack[len(rd.stack)-1] - rd.stack = rd.stack[:len(rd.stack)-1] - - return in.check(rd.off, rd.raw) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/real_encoder.go b/Godeps/_workspace/src/github.com/Shopify/sarama/real_encoder.go deleted file mode 100644 index 947ce98d9..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/real_encoder.go +++ /dev/null @@ -1,100 +0,0 @@ -package sarama - -import "encoding/binary" - -type realEncoder struct { - raw []byte - off int - stack []pushEncoder -} - -// primitives - -func (re *realEncoder) putInt8(in int8) { - re.raw[re.off] = byte(in) - re.off += 1 -} - -func (re *realEncoder) putInt16(in int16) { - binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in)) - re.off += 2 -} - -func (re *realEncoder) putInt32(in int32) { - binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in)) - re.off += 4 -} - -func (re *realEncoder) putInt64(in int64) { - binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in)) - re.off += 8 -} - -func (re *realEncoder) putArrayLength(in int) error { - re.putInt32(int32(in)) - return nil -} - -// collection - -func (re *realEncoder) putRawBytes(in []byte) error { - copy(re.raw[re.off:], in) - re.off += len(in) - return nil -} - -func (re *realEncoder) putBytes(in []byte) error { - if in == nil { - re.putInt32(-1) - return nil - } - re.putInt32(int32(len(in))) - copy(re.raw[re.off:], in) - re.off += len(in) - return nil -} - -func (re *realEncoder) putString(in string) error { - re.putInt16(int16(len(in))) - copy(re.raw[re.off:], in) - re.off += len(in) - return nil -} - -func (re *realEncoder) putInt32Array(in []int32) error { - err := re.putArrayLength(len(in)) - if err != nil { - return err - } - for _, val := range in { - re.putInt32(val) - } - return nil -} - -func (re *realEncoder) putInt64Array(in []int64) error { - err := re.putArrayLength(len(in)) - if err != nil { - return err - } - for _, val := range in { - re.putInt64(val) - } - return nil -} - -// stacks - -func (re *realEncoder) push(in pushEncoder) { - in.saveOffset(re.off) - re.off += in.reserveLength() - re.stack = append(re.stack, in) -} - -func (re *realEncoder) pop() error { - // this is go's ugly pop pattern (the inverse of append) - in := re.stack[len(re.stack)-1] - re.stack = re.stack[:len(re.stack)-1] - - return in.run(re.off, re.raw) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/request.go b/Godeps/_workspace/src/github.com/Shopify/sarama/request.go deleted file mode 100644 index d6d5cdfcd..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/request.go +++ /dev/null @@ -1,100 +0,0 @@ -package sarama - -import ( - "encoding/binary" - "fmt" - "io" -) - -type requestBody interface { - encoder - decoder - key() int16 - version() int16 -} - -type request struct { - correlationID int32 - clientID string - body requestBody -} - -func (r *request) encode(pe packetEncoder) (err error) { - pe.push(&lengthField{}) - pe.putInt16(r.body.key()) - pe.putInt16(r.body.version()) - pe.putInt32(r.correlationID) - err = pe.putString(r.clientID) - if err != nil { - return err - } - err = r.body.encode(pe) - if err != nil { - return err - } - return pe.pop() -} - -func (r *request) decode(pd packetDecoder) (err error) { - var key int16 - if key, err = pd.getInt16(); err != nil { - return err - } - var version int16 - if version, err = pd.getInt16(); err != nil { - return err - } - if r.correlationID, err = pd.getInt32(); err != nil { - return err - } - r.clientID, err = pd.getString() - - r.body = allocateBody(key, version) - if r.body == nil { - return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} - } - return r.body.decode(pd) -} - -func decodeRequest(r io.Reader) (req *request, err error) { - lengthBytes := make([]byte, 4) - if _, err := io.ReadFull(r, lengthBytes); err != nil { - return nil, err - } - - length := int32(binary.BigEndian.Uint32(lengthBytes)) - if length <= 4 || length > MaxRequestSize { - return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} - } - - encodedReq := make([]byte, length) - if _, err := io.ReadFull(r, encodedReq); err != nil { - return nil, err - } - - req = &request{} - if err := decode(encodedReq, req); err != nil { - return nil, err - } - return req, nil -} - -func allocateBody(key, version int16) requestBody { - switch key { - case 0: - return &ProduceRequest{} - case 1: - return &FetchRequest{} - case 2: - return &OffsetRequest{} - case 3: - return &MetadataRequest{} - case 8: - return &OffsetCommitRequest{Version: version} - case 9: - return &OffsetFetchRequest{} - case 10: - return &ConsumerMetadataRequest{} - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/request_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/request_test.go deleted file mode 100644 index 69e8b4cbe..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/request_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package sarama - -import ( - "bytes" - "reflect" - "testing" -) - -type testRequestBody struct { -} - -func (s *testRequestBody) key() int16 { - return 0x666 -} - -func (s *testRequestBody) version() int16 { - return 0xD2 -} - -func (s *testRequestBody) encode(pe packetEncoder) error { - return pe.putString("abc") -} - -// not specific to request tests, just helper functions for testing structures that -// implement the encoder or decoder interfaces that needed somewhere to live - -func testEncodable(t *testing.T, name string, in encoder, expect []byte) { - packet, err := encode(in) - if err != nil { - t.Error(err) - } else if !bytes.Equal(packet, expect) { - t.Error("Encoding", name, "failed\ngot ", packet, "\nwant", expect) - } -} - -func testDecodable(t *testing.T, name string, out decoder, in []byte) { - err := decode(in, out) - if err != nil { - t.Error("Decoding", name, "failed:", err) - } -} - -func testRequest(t *testing.T, name string, rb requestBody, expected []byte) { - // Encoder request - req := &request{correlationID: 123, clientID: "foo", body: rb} - packet, err := encode(req) - headerSize := 14 + len("foo") - if err != nil { - t.Error(err) - } else if !bytes.Equal(packet[headerSize:], expected) { - t.Error("Encoding", name, "failed\ngot ", packet, "\nwant", expected) - } - // Decoder request - decoded, err := decodeRequest(bytes.NewReader(packet)) - if err != nil { - t.Error("Failed to decode request", err) - } else if decoded.correlationID != 123 || decoded.clientID != "foo" { - t.Errorf("Decoded header is not valid: %v", decoded) - } else if !reflect.DeepEqual(rb, decoded.body) { - t.Errorf("Decoded request does not match the encoded one\nencoded: %v\ndecoded: %v", rb, decoded) - } -} - -func testResponse(t *testing.T, name string, res encoder, expected []byte) { - encoded, err := encode(res) - if err != nil { - t.Error(err) - } else if expected != nil && !bytes.Equal(encoded, expected) { - t.Error("Encoding", name, "failed\ngot ", encoded, "\nwant", expected) - } - - decoded := reflect.New(reflect.TypeOf(res).Elem()).Interface().(decoder) - if err := decode(encoded, decoded); err != nil { - t.Error("Decoding", name, "failed:", err) - } - - if !reflect.DeepEqual(decoded, res) { - t.Errorf("Decoded response does not match the encoded one\nencoded: %#v\ndecoded: %#v", res, decoded) - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/response_header.go b/Godeps/_workspace/src/github.com/Shopify/sarama/response_header.go deleted file mode 100644 index f3f4d27d6..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/response_header.go +++ /dev/null @@ -1,21 +0,0 @@ -package sarama - -import "fmt" - -type responseHeader struct { - length int32 - correlationID int32 -} - -func (r *responseHeader) decode(pd packetDecoder) (err error) { - r.length, err = pd.getInt32() - if err != nil { - return err - } - if r.length <= 4 || r.length > MaxResponseSize { - return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)} - } - - r.correlationID, err = pd.getInt32() - return err -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/response_header_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/response_header_test.go deleted file mode 100644 index 8f9fdb80c..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/response_header_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package sarama - -import "testing" - -var ( - responseHeaderBytes = []byte{ - 0x00, 0x00, 0x0f, 0x00, - 0x0a, 0xbb, 0xcc, 0xff} -) - -func TestResponseHeader(t *testing.T) { - header := responseHeader{} - - testDecodable(t, "response header", &header, responseHeaderBytes) - if header.length != 0xf00 { - t.Error("Decoding header length failed, got", header.length) - } - if header.correlationID != 0x0abbccff { - t.Error("Decoding header correlation id failed, got", header.correlationID) - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/sarama.go b/Godeps/_workspace/src/github.com/Shopify/sarama/sarama.go deleted file mode 100644 index d59821750..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/sarama.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Package sarama provides client libraries for the Kafka 0.8 protocol. The AsyncProducer object is the high-level -API for producing messages asynchronously; the SyncProducer provides a blocking API for the same purpose. -The Consumer object is the high-level API for consuming messages. The Client object provides metadata -management functionality that is shared between the higher-level objects. - -For lower-level needs, the Broker and Request/Response objects permit precise control over each connection -and message sent on the wire. - -The Request/Response objects and properties are mostly undocumented, as they line up exactly with the -protocol fields documented by Kafka at https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol -*/ -package sarama - -import ( - "io/ioutil" - "log" -) - -// Logger is the instance of a StdLogger interface that Sarama writes connection -// management events to. By default it is set to discard all log messages via ioutil.Discard, -// but you can set it to redirect wherever you want. -var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) - -// StdLogger is used to log error messages. -type StdLogger interface { - Print(v ...interface{}) - Printf(format string, v ...interface{}) - Println(v ...interface{}) -} - -// PanicHandler is called for recovering from panics spawned internally to the library (and thus -// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. -var PanicHandler func(interface{}) - -// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying -// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned -// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt -// to process. -var MaxRequestSize int32 = 100 * 1024 * 1024 - -// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If -// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to -// protect the client from running out of memory. Please note that brokers do not have any natural limit on -// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers -// (see https://issues.apache.org/jira/browse/KAFKA-2063). -var MaxResponseSize int32 = 100 * 1024 * 1024 diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/snappy.go b/Godeps/_workspace/src/github.com/Shopify/sarama/snappy.go deleted file mode 100644 index e86cb7039..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/snappy.go +++ /dev/null @@ -1,41 +0,0 @@ -package sarama - -import ( - "bytes" - "encoding/binary" - - "github.com/golang/snappy" -) - -var snappyMagic = []byte{130, 83, 78, 65, 80, 80, 89, 0} - -// SnappyEncode encodes binary data -func snappyEncode(src []byte) []byte { - return snappy.Encode(nil, src) -} - -// SnappyDecode decodes snappy data -func snappyDecode(src []byte) ([]byte, error) { - if bytes.Equal(src[:8], snappyMagic) { - var ( - pos = uint32(16) - max = uint32(len(src)) - dst = make([]byte, 0, len(src)) - chunk []byte - err error - ) - for pos < max { - size := binary.BigEndian.Uint32(src[pos : pos+4]) - pos += 4 - - chunk, err = snappy.Decode(chunk, src[pos:pos+size]) - if err != nil { - return nil, err - } - pos += size - dst = append(dst, chunk...) - } - return dst, nil - } - return snappy.Decode(nil, src) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/snappy_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/snappy_test.go deleted file mode 100644 index f3cf7ff5c..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/snappy_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package sarama - -import ( - "bytes" - "testing" -) - -var snappyTestCases = map[string][]byte{ - "REPEATREPEATREPEATREPEATREPEATREPEAT": []byte{36, 20, 82, 69, 80, 69, 65, 84, 118, 6, 0}, - "REALLY SHORT": []byte{12, 44, 82, 69, 65, 76, 76, 89, 32, 83, 72, 79, 82, 84}, - "AXBXCXDXEXFX": []byte{12, 44, 65, 88, 66, 88, 67, 88, 68, 88, 69, 88, 70, 88}, -} - -var snappyStreamTestCases = map[string][]byte{ - "PLAINDATA": []byte{130, 83, 78, 65, 80, 80, 89, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 11, 9, 32, 80, 76, 65, 73, 78, 68, 65, 84, 65}, - `{"a":"UtaitILHMDAAAAfU","b":"日本"}`: []byte{130, 83, 78, 65, 80, 80, 89, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 39, 37, 144, 123, 34, 97, 34, 58, 34, 85, 116, 97, 105, 116, 73, 76, 72, 77, 68, 65, 65, 65, 65, 102, 85, 34, 44, 34, 98, 34, 58, 34, 230, 151, 165, 230, 156, 172, 34, 125}, - `Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias except`: []byte{130, 83, 78, 65, 80, 80, 89, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 3, 89, 128, 8, 240, 90, 83, 101, 100, 32, 117, 116, 32, 112, 101, 114, 115, 112, 105, 99, 105, 97, 116, 105, 115, 32, 117, 110, 100, 101, 32, 111, 109, 110, 105, 115, 32, 105, 115, 116, 101, 32, 110, 97, 116, 117, 115, 32, 101, 114, 114, 111, 114, 32, 115, 105, 116, 32, 118, 111, 108, 117, 112, 116, 97, 116, 101, 109, 32, 97, 99, 99, 117, 115, 97, 110, 116, 105, 117, 109, 32, 100, 111, 108, 111, 114, 101, 109, 113, 117, 101, 32, 108, 97, 117, 100, 97, 5, 22, 240, 60, 44, 32, 116, 111, 116, 97, 109, 32, 114, 101, 109, 32, 97, 112, 101, 114, 105, 97, 109, 44, 32, 101, 97, 113, 117, 101, 32, 105, 112, 115, 97, 32, 113, 117, 97, 101, 32, 97, 98, 32, 105, 108, 108, 111, 32, 105, 110, 118, 101, 110, 116, 111, 114, 101, 32, 118, 101, 114, 105, 116, 97, 1, 141, 4, 101, 116, 1, 36, 88, 115, 105, 32, 97, 114, 99, 104, 105, 116, 101, 99, 116, 111, 32, 98, 101, 97, 116, 97, 101, 32, 118, 105, 1, 6, 120, 100, 105, 99, 116, 97, 32, 115, 117, 110, 116, 32, 101, 120, 112, 108, 105, 99, 97, 98, 111, 46, 32, 78, 101, 109, 111, 32, 101, 110, 105, 109, 5, 103, 0, 109, 46, 180, 0, 12, 113, 117, 105, 97, 17, 16, 0, 115, 5, 209, 72, 97, 115, 112, 101, 114, 110, 97, 116, 117, 114, 32, 97, 117, 116, 32, 111, 100, 105, 116, 5, 9, 36, 102, 117, 103, 105, 116, 44, 32, 115, 101, 100, 9, 53, 32, 99, 111, 110, 115, 101, 113, 117, 117, 110, 1, 42, 20, 109, 97, 103, 110, 105, 32, 9, 245, 16, 115, 32, 101, 111, 115, 1, 36, 28, 32, 114, 97, 116, 105, 111, 110, 101, 17, 96, 33, 36, 1, 51, 36, 105, 32, 110, 101, 115, 99, 105, 117, 110, 116, 1, 155, 1, 254, 16, 112, 111, 114, 114, 111, 1, 51, 36, 115, 113, 117, 97, 109, 32, 101, 115, 116, 44, 1, 14, 13, 81, 5, 183, 4, 117, 109, 1, 18, 0, 97, 9, 19, 4, 32, 115, 1, 149, 12, 109, 101, 116, 44, 9, 135, 76, 99, 116, 101, 116, 117, 114, 44, 32, 97, 100, 105, 112, 105, 115, 99, 105, 32, 118, 101, 108, 50, 173, 0, 24, 110, 111, 110, 32, 110, 117, 109, 9, 94, 84, 105, 117, 115, 32, 109, 111, 100, 105, 32, 116, 101, 109, 112, 111, 114, 97, 32, 105, 110, 99, 105, 100, 33, 52, 20, 117, 116, 32, 108, 97, 98, 33, 116, 4, 101, 116, 9, 106, 0, 101, 5, 219, 20, 97, 109, 32, 97, 108, 105, 5, 62, 33, 164, 8, 114, 97, 116, 29, 212, 12, 46, 32, 85, 116, 41, 94, 52, 97, 100, 32, 109, 105, 110, 105, 109, 97, 32, 118, 101, 110, 105, 33, 221, 72, 113, 117, 105, 115, 32, 110, 111, 115, 116, 114, 117, 109, 32, 101, 120, 101, 114, 99, 105, 33, 202, 104, 111, 110, 101, 109, 32, 117, 108, 108, 97, 109, 32, 99, 111, 114, 112, 111, 114, 105, 115, 32, 115, 117, 115, 99, 105, 112, 105, 13, 130, 8, 105, 111, 115, 1, 64, 12, 110, 105, 115, 105, 1, 150, 5, 126, 44, 105, 100, 32, 101, 120, 32, 101, 97, 32, 99, 111, 109, 5, 192, 0, 99, 41, 131, 33, 172, 8, 63, 32, 81, 1, 107, 4, 97, 117, 33, 101, 96, 118, 101, 108, 32, 101, 117, 109, 32, 105, 117, 114, 101, 32, 114, 101, 112, 114, 101, 104, 101, 110, 100, 101, 114, 105, 65, 63, 12, 105, 32, 105, 110, 1, 69, 16, 118, 111, 108, 117, 112, 65, 185, 1, 47, 24, 105, 116, 32, 101, 115, 115, 101, 1, 222, 64, 109, 32, 110, 105, 104, 105, 108, 32, 109, 111, 108, 101, 115, 116, 105, 97, 101, 46, 103, 0, 0, 44, 1, 45, 16, 32, 105, 108, 108, 117, 37, 143, 45, 36, 0, 109, 5, 110, 65, 33, 20, 97, 116, 32, 113, 117, 111, 17, 92, 44, 115, 32, 110, 117, 108, 108, 97, 32, 112, 97, 114, 105, 9, 165, 24, 65, 116, 32, 118, 101, 114, 111, 69, 34, 44, 101, 116, 32, 97, 99, 99, 117, 115, 97, 109, 117, 115, 1, 13, 104, 105, 117, 115, 116, 111, 32, 111, 100, 105, 111, 32, 100, 105, 103, 110, 105, 115, 115, 105, 109, 111, 115, 32, 100, 117, 99, 105, 1, 34, 80, 113, 117, 105, 32, 98, 108, 97, 110, 100, 105, 116, 105, 105, 115, 32, 112, 114, 97, 101, 115, 101, 101, 87, 17, 111, 56, 116, 117, 109, 32, 100, 101, 108, 101, 110, 105, 116, 105, 32, 97, 116, 65, 89, 28, 99, 111, 114, 114, 117, 112, 116, 105, 1, 150, 0, 115, 13, 174, 5, 109, 8, 113, 117, 97, 65, 5, 52, 108, 101, 115, 116, 105, 97, 115, 32, 101, 120, 99, 101, 112, 116, 0, 0, 0, 1, 0}, -} - -func TestSnappyEncode(t *testing.T) { - for src, exp := range snappyTestCases { - dst := snappyEncode([]byte(src)) - if !bytes.Equal(dst, exp) { - t.Errorf("Expected %s to generate %v, but was %v", src, exp, dst) - } - } -} - -func TestSnappyDecode(t *testing.T) { - for exp, src := range snappyTestCases { - dst, err := snappyDecode(src) - if err != nil { - t.Error("Encoding error: ", err) - } else if !bytes.Equal(dst, []byte(exp)) { - t.Errorf("Expected %s to be generated from %v, but was %s", exp, src, string(dst)) - } - } -} - -func TestSnappyDecodeStreams(t *testing.T) { - for exp, src := range snappyStreamTestCases { - dst, err := snappyDecode(src) - if err != nil { - t.Error("Encoding error: ", err) - } else if !bytes.Equal(dst, []byte(exp)) { - t.Errorf("Expected %s to be generated from [%d]byte, but was %s", exp, len(src), string(dst)) - } - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer.go deleted file mode 100644 index b59d74a20..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer.go +++ /dev/null @@ -1,94 +0,0 @@ -package sarama - -import "sync" - -// SyncProducer publishes Kafka messages. It routes messages to the correct broker, refreshing metadata as appropriate, -// and parses responses for errors. You must call Close() on a producer to avoid leaks, it may not be garbage-collected automatically when -// it passes out of scope. -type SyncProducer interface { - - // SendMessage produces a given message, and returns only when it either has succeeded or failed to produce. - // It will return the partition and the offset of the produced message, or an error if the message - // failed to produce. - SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) - - // Close shuts down the producer and flushes any messages it may have buffered. You must call this function before - // a producer object passes out of scope, as it may otherwise leak memory. You must call this before calling Close - // on the underlying client. - Close() error -} - -type syncProducer struct { - producer *asyncProducer - wg sync.WaitGroup -} - -// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration. -func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { - p, err := NewAsyncProducer(addrs, config) - if err != nil { - return nil, err - } - return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil -} - -// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still -// necessary to call Close() on the underlying client when shutting down this producer. -func NewSyncProducerFromClient(client Client) (SyncProducer, error) { - p, err := NewAsyncProducerFromClient(client) - if err != nil { - return nil, err - } - return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil -} - -func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { - p.conf.Producer.Return.Successes = true - p.conf.Producer.Return.Errors = true - sp := &syncProducer{producer: p} - - sp.wg.Add(2) - go withRecover(sp.handleSuccesses) - go withRecover(sp.handleErrors) - - return sp -} - -func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { - oldMetadata := msg.Metadata - defer func() { - msg.Metadata = oldMetadata - }() - - expectation := make(chan error, 1) - msg.Metadata = expectation - sp.producer.Input() <- msg - - if err := <-expectation; err != nil { - return -1, -1, err - } else { - return msg.Partition, msg.Offset, nil - } -} - -func (sp *syncProducer) handleSuccesses() { - defer sp.wg.Done() - for msg := range sp.producer.Successes() { - expectation := msg.Metadata.(chan error) - expectation <- nil - } -} - -func (sp *syncProducer) handleErrors() { - defer sp.wg.Done() - for err := range sp.producer.Errors() { - expectation := err.Msg.Metadata.(chan error) - expectation <- err.Err - } -} - -func (sp *syncProducer) Close() error { - sp.producer.AsyncClose() - sp.wg.Wait() - return nil -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer_test.go b/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer_test.go deleted file mode 100644 index d378949b1..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/sync_producer_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package sarama - -import ( - "log" - "sync" - "testing" -) - -func TestSyncProducer(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader := newMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - for i := 0; i < 10; i++ { - leader.Returns(prodSuccess) - } - - producer, err := NewSyncProducer([]string{seedBroker.Addr()}, nil) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - msg := &ProducerMessage{ - Topic: "my_topic", - Value: StringEncoder(TestMessage), - Metadata: "test", - } - - partition, offset, err := producer.SendMessage(msg) - - if partition != 0 || msg.Partition != partition { - t.Error("Unexpected partition") - } - if offset != 0 || msg.Offset != offset { - t.Error("Unexpected offset") - } - if str, ok := msg.Metadata.(string); !ok || str != "test" { - t.Error("Unexpected metadata") - } - if err != nil { - t.Error(err) - } - } - - safeClose(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestConcurrentSyncProducer(t *testing.T) { - seedBroker := newMockBroker(t, 1) - leader := newMockBroker(t, 2) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) - seedBroker.Returns(metadataResponse) - - prodSuccess := new(ProduceResponse) - prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) - leader.Returns(prodSuccess) - - config := NewConfig() - config.Producer.Flush.Messages = 100 - producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - wg := sync.WaitGroup{} - - for i := 0; i < 100; i++ { - wg.Add(1) - go func() { - msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder(TestMessage)} - partition, _, err := producer.SendMessage(msg) - if partition != 0 { - t.Error("Unexpected partition") - } - if err != nil { - t.Error(err) - } - wg.Done() - }() - } - wg.Wait() - - safeClose(t, producer) - leader.Close() - seedBroker.Close() -} - -func TestSyncProducerToNonExistingTopic(t *testing.T) { - broker := newMockBroker(t, 1) - - metadataResponse := new(MetadataResponse) - metadataResponse.AddBroker(broker.Addr(), broker.BrokerID()) - metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, ErrNoError) - broker.Returns(metadataResponse) - - config := NewConfig() - config.Metadata.Retry.Max = 0 - config.Producer.Retry.Max = 0 - - producer, err := NewSyncProducer([]string{broker.Addr()}, config) - if err != nil { - t.Fatal(err) - } - - metadataResponse = new(MetadataResponse) - metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) - broker.Returns(metadataResponse) - - _, _, err = producer.SendMessage(&ProducerMessage{Topic: "unknown"}) - if err != ErrUnknownTopicOrPartition { - t.Error("Uxpected ErrUnknownTopicOrPartition, found:", err) - } - - safeClose(t, producer) - broker.Close() -} - -// This example shows the basic usage pattern of the SyncProducer. -func ExampleSyncProducer() { - producer, err := NewSyncProducer([]string{"localhost:9092"}, nil) - if err != nil { - log.Fatalln(err) - } - defer func() { - if err := producer.Close(); err != nil { - log.Fatalln(err) - } - }() - - msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")} - partition, offset, err := producer.SendMessage(msg) - if err != nil { - log.Printf("FAILED to send message: %s\n", err) - } else { - log.Printf("> message sent to partition %d at offset %d\n", partition, offset) - } -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/README.md deleted file mode 100644 index 3464c4ad8..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Sarama tools - -This folder contains applications that are useful for exploration of your Kafka cluster, or instrumentation. -Some of these tools mirror tools that ship with Kafka, but these tools won't require installing the JVM to function. - -- [kafka-console-producer](./kafka-console-producer): a command line tool to produce a single message to your Kafka custer. -- [kafka-console-partitionconsumer](./kafka-console-partitionconsumer): (deprecated) a command line tool to consume a single partition of a topic on your Kafka cluster. -- [kafka-console-consumer](./kafka-console-consumer): a command line tool to consume arbitrary partitions of a topic on your Kafka cluster. - -To install all tools, run `go get github.com/Shopify/sarama/tools/...` diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore deleted file mode 100644 index 67da9dfa9..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -kafka-console-consumer -kafka-console-consumer.test diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md deleted file mode 100644 index 4e77f0b70..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# kafka-console-consumer - -A simple command line tool to consume partitions of a topic and print the -messages on the standard output. - -### Installation - - go get github.com/Shopify/sarama/tools/kafka-console-consumer - -### Usage - - # Minimum invocation - kafka-console-consumer -topic=test -brokers=kafka1:9092 - - # It will pick up a KAFKA_PEERS environment variable - export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 - kafka-console-consumer -topic=test - - # You can specify the offset you want to start at. It can be either - # `oldest`, `newest`. The default is `newest`. - kafka-console-consumer -topic=test -offset=oldest - kafka-console-consumer -topic=test -offset=newest - - # You can specify the partition(s) you want to consume as a comma-separated - # list. The default is `all`. - kafka-console-consumer -topic=test -partitions=1,2,3 - - # Display all command line options - kafka-console-consumer -help diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go deleted file mode 100644 index 0f1eb89a9..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go +++ /dev/null @@ -1,145 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "os/signal" - "strconv" - "strings" - "sync" - - "github.com/Shopify/sarama" -) - -var ( - brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster") - topic = flag.String("topic", "", "REQUIRED: the topic to consume") - partitions = flag.String("partitions", "all", "The partitions to consume, can be 'all' or comma-separated numbers") - offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`") - verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging") - bufferSize = flag.Int("buffer-size", 256, "The buffer size of the message channel.") - - logger = log.New(os.Stderr, "", log.LstdFlags) -) - -func main() { - flag.Parse() - - if *brokerList == "" { - printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.") - } - - if *topic == "" { - printUsageErrorAndExit("-topic is required") - } - - if *verbose { - sarama.Logger = logger - } - - var initialOffset int64 - switch *offset { - case "oldest": - initialOffset = sarama.OffsetOldest - case "newest": - initialOffset = sarama.OffsetNewest - default: - printUsageErrorAndExit("-offset should be `oldest` or `newest`") - } - - c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil) - if err != nil { - printErrorAndExit(69, "Failed to start consumer: %s", err) - } - - partitionList, err := getPartitions(c) - if err != nil { - printErrorAndExit(69, "Failed to get the list of partitions: %s", err) - } - - var ( - messages = make(chan *sarama.ConsumerMessage, *bufferSize) - closing = make(chan struct{}) - wg sync.WaitGroup - ) - - go func() { - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Kill, os.Interrupt) - <-signals - logger.Println("Initiating shutdown of consumer...") - close(closing) - }() - - for _, partition := range partitionList { - pc, err := c.ConsumePartition(*topic, partition, initialOffset) - if err != nil { - printErrorAndExit(69, "Failed to start consumer for partition %d: %s", partition, err) - } - - go func(pc sarama.PartitionConsumer) { - <-closing - pc.AsyncClose() - }(pc) - - wg.Add(1) - go func(pc sarama.PartitionConsumer) { - defer wg.Done() - for message := range pc.Messages() { - messages <- message - } - }(pc) - } - - go func() { - for msg := range messages { - fmt.Printf("Partition:\t%d\n", msg.Partition) - fmt.Printf("Offset:\t%d\n", msg.Offset) - fmt.Printf("Key:\t%s\n", string(msg.Key)) - fmt.Printf("Value:\t%s\n", string(msg.Value)) - fmt.Println() - } - }() - - wg.Wait() - logger.Println("Done consuming topic", *topic) - close(messages) - - if err := c.Close(); err != nil { - logger.Println("Failed to close consumer: ", err) - } -} - -func getPartitions(c sarama.Consumer) ([]int32, error) { - if *partitions == "all" { - return c.Partitions(*topic) - } - - tmp := strings.Split(*partitions, ",") - var pList []int32 - for i := range tmp { - val, err := strconv.ParseInt(tmp[i], 10, 32) - if err != nil { - return nil, err - } - pList = append(pList, int32(val)) - } - - return pList, nil -} - -func printErrorAndExit(code int, format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - os.Exit(code) -} - -func printUsageErrorAndExit(format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - fmt.Fprintln(os.Stderr, "Available command line options:") - flag.PrintDefaults() - os.Exit(64) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore deleted file mode 100644 index 5837fe8ca..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -kafka-console-partitionconsumer -kafka-console-partitionconsumer.test diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md deleted file mode 100644 index 646dd5f5c..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# kafka-console-partitionconsumer - -NOTE: this tool is deprecated in favour of the more general and more powerful -`kafka-console-consumer`. - -A simple command line tool to consume a partition of a topic and print the messages -on the standard output. - -### Installation - - go get github.com/Shopify/sarama/tools/kafka-console-partitionconsumer - -### Usage - - # Minimum invocation - kafka-console-partitionconsumer -topic=test -partition=4 -brokers=kafka1:9092 - - # It will pick up a KAFKA_PEERS environment variable - export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 - kafka-console-partitionconsumer -topic=test -partition=4 - - # You can specify the offset you want to start at. It can be either - # `oldest`, `newest`, or a specific offset number - kafka-console-partitionconsumer -topic=test -partition=3 -offset=oldest - kafka-console-partitionconsumer -topic=test -partition=2 -offset=1337 - - # Display all command line options - kafka-console-partitionconsumer -help diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go deleted file mode 100644 index d5e4464de..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go +++ /dev/null @@ -1,102 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "os/signal" - "strconv" - "strings" - - "github.com/Shopify/sarama" -) - -var ( - brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster") - topic = flag.String("topic", "", "REQUIRED: the topic to consume") - partition = flag.Int("partition", -1, "REQUIRED: the partition to consume") - offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`, or an actual offset") - verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging") - - logger = log.New(os.Stderr, "", log.LstdFlags) -) - -func main() { - flag.Parse() - - if *brokerList == "" { - printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.") - } - - if *topic == "" { - printUsageErrorAndExit("-topic is required") - } - - if *partition == -1 { - printUsageErrorAndExit("-partition is required") - } - - if *verbose { - sarama.Logger = logger - } - - var ( - initialOffset int64 - offsetError error - ) - switch *offset { - case "oldest": - initialOffset = sarama.OffsetOldest - case "newest": - initialOffset = sarama.OffsetNewest - default: - initialOffset, offsetError = strconv.ParseInt(*offset, 10, 64) - } - - if offsetError != nil { - printUsageErrorAndExit("Invalid initial offset: %s", *offset) - } - - c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil) - if err != nil { - printErrorAndExit(69, "Failed to start consumer: %s", err) - } - - pc, err := c.ConsumePartition(*topic, int32(*partition), initialOffset) - if err != nil { - printErrorAndExit(69, "Failed to start partition consumer: %s", err) - } - - go func() { - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Kill, os.Interrupt) - <-signals - pc.AsyncClose() - }() - - for msg := range pc.Messages() { - fmt.Printf("Offset:\t%d\n", msg.Offset) - fmt.Printf("Key:\t%s\n", string(msg.Key)) - fmt.Printf("Value:\t%s\n", string(msg.Value)) - fmt.Println() - } - - if err := c.Close(); err != nil { - logger.Println("Failed to close consumer: ", err) - } -} - -func printErrorAndExit(code int, format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - os.Exit(code) -} - -func printUsageErrorAndExit(format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - fmt.Fprintln(os.Stderr, "Available command line options:") - flag.PrintDefaults() - os.Exit(64) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore deleted file mode 100644 index 2b9e563a1..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -kafka-console-producer -kafka-console-producer.test diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/README.md b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/README.md deleted file mode 100644 index 6b3a65f21..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# kafka-console-producer - -A simple command line tool to produce a single message to Kafka. - -### Installation - - go get github.com/Shopify/sarama/tools/kafka-console-producer - - -### Usage - - # Minimum invocation - kafka-console-producer -topic=test -value=value -brokers=kafka1:9092 - - # It will pick up a KAFKA_PEERS environment variable - export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092 - kafka-console-producer -topic=test -value=value - - # It will read the value from stdin by using pipes - echo "hello world" | kafka-console-producer -topic=test - - # Specify a key: - echo "hello world" | kafka-console-producer -topic=test -key=key - - # Partitioning: by default, kafka-console-producer will partition as follows: - # - manual partitioning if a -partition is provided - # - hash partitioning by key if a -key is provided - # - random partioning otherwise. - # - # You can override this using the -partitioner argument: - echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random - - # Display all command line options - kafka-console-producer -help diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go b/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go deleted file mode 100644 index 6a1765d7c..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go +++ /dev/null @@ -1,118 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "os" - "strings" - - "github.com/Shopify/sarama" -) - -var ( - brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster. You can also set the KAFKA_PEERS environment variable") - topic = flag.String("topic", "", "REQUIRED: the topic to produce to") - key = flag.String("key", "", "The key of the message to produce. Can be empty.") - value = flag.String("value", "", "REQUIRED: the value of the message to produce. You can also provide the value on stdin.") - partitioner = flag.String("partitioner", "", "The partitioning scheme to use. Can be `hash`, `manual`, or `random`") - partition = flag.Int("partition", -1, "The partition to produce to.") - verbose = flag.Bool("verbose", false, "Turn on sarama logging to stderr") - silent = flag.Bool("silent", false, "Turn off printing the message's topic, partition, and offset to stdout") - - logger = log.New(os.Stderr, "", log.LstdFlags) -) - -func main() { - flag.Parse() - - if *brokerList == "" { - printUsageErrorAndExit("no -brokers specified. Alternatively, set the KAFKA_PEERS environment variable") - } - - if *topic == "" { - printUsageErrorAndExit("no -topic specified") - } - - if *verbose { - sarama.Logger = logger - } - - config := sarama.NewConfig() - config.Producer.RequiredAcks = sarama.WaitForAll - - switch *partitioner { - case "": - if *partition >= 0 { - config.Producer.Partitioner = sarama.NewManualPartitioner - } else { - config.Producer.Partitioner = sarama.NewHashPartitioner - } - case "hash": - config.Producer.Partitioner = sarama.NewHashPartitioner - case "random": - config.Producer.Partitioner = sarama.NewRandomPartitioner - case "manual": - config.Producer.Partitioner = sarama.NewManualPartitioner - if *partition == -1 { - printUsageErrorAndExit("-partition is required when partitioning manually") - } - default: - printUsageErrorAndExit(fmt.Sprintf("Partitioner %s not supported.", *partitioner)) - } - - message := &sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)} - - if *key != "" { - message.Key = sarama.StringEncoder(*key) - } - - if *value != "" { - message.Value = sarama.StringEncoder(*value) - } else if stdinAvailable() { - bytes, err := ioutil.ReadAll(os.Stdin) - if err != nil { - printErrorAndExit(66, "Failed to read data from the standard input: %s", err) - } - message.Value = sarama.ByteEncoder(bytes) - } else { - printUsageErrorAndExit("-value is required, or you have to provide the value on stdin") - } - - producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config) - if err != nil { - printErrorAndExit(69, "Failed to open Kafka producer: %s", err) - } - defer func() { - if err := producer.Close(); err != nil { - logger.Println("Failed to close Kafka producer cleanly:", err) - } - }() - - partition, offset, err := producer.SendMessage(message) - if err != nil { - printErrorAndExit(69, "Failed to produce message: %s", err) - } else if !*silent { - fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset) - } -} - -func printErrorAndExit(code int, format string, values ...interface{}) { - fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...)) - fmt.Fprintln(os.Stderr) - os.Exit(code) -} - -func printUsageErrorAndExit(message string) { - fmt.Fprintln(os.Stderr, "ERROR:", message) - fmt.Fprintln(os.Stderr) - fmt.Fprintln(os.Stderr, "Available command line options:") - flag.PrintDefaults() - os.Exit(64) -} - -func stdinAvailable() bool { - stat, _ := os.Stdin.Stat() - return (stat.Mode() & os.ModeCharDevice) == 0 -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/utils.go b/Godeps/_workspace/src/github.com/Shopify/sarama/utils.go deleted file mode 100644 index fef7c7381..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/utils.go +++ /dev/null @@ -1,89 +0,0 @@ -package sarama - -import "sort" - -type none struct{} - -// make []int32 sortable so we can sort partition numbers -type int32Slice []int32 - -func (slice int32Slice) Len() int { - return len(slice) -} - -func (slice int32Slice) Less(i, j int) bool { - return slice[i] < slice[j] -} - -func (slice int32Slice) Swap(i, j int) { - slice[i], slice[j] = slice[j], slice[i] -} - -func dupeAndSort(input []int32) []int32 { - ret := make([]int32, 0, len(input)) - for _, val := range input { - ret = append(ret, val) - } - - sort.Sort(int32Slice(ret)) - return ret -} - -func withRecover(fn func()) { - defer func() { - handler := PanicHandler - if handler != nil { - if err := recover(); err != nil { - handler(err) - } - } - }() - - fn() -} - -func safeAsyncClose(b *Broker) { - tmp := b // local var prevents clobbering in goroutine - go withRecover(func() { - if connected, _ := tmp.Connected(); connected { - if err := tmp.Close(); err != nil { - Logger.Println("Error closing broker", tmp.ID(), ":", err) - } - } - }) -} - -// Encoder is a simple interface for any type that can be encoded as an array of bytes -// in order to be sent as the key or value of a Kafka message. Length() is provided as an -// optimization, and must return the same as len() on the result of Encode(). -type Encoder interface { - Encode() ([]byte, error) - Length() int -} - -// make strings and byte slices encodable for convenience so they can be used as keys -// and/or values in kafka messages - -// StringEncoder implements the Encoder interface for Go strings so that they can be used -// as the Key or Value in a ProducerMessage. -type StringEncoder string - -func (s StringEncoder) Encode() ([]byte, error) { - return []byte(s), nil -} - -func (s StringEncoder) Length() int { - return len(s) -} - -// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used -// as the Key or Value in a ProducerMessage. -type ByteEncoder []byte - -func (b ByteEncoder) Encode() ([]byte, error) { - return b, nil -} - -func (b ByteEncoder) Length() int { - return len(b) -} diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/boot_cluster.sh b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/boot_cluster.sh deleted file mode 100644 index 95e47dde4..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/boot_cluster.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -set -ex - -# Launch and wait for toxiproxy -${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh & -while ! nc -q 1 localhost 2181 ${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}/myid -done diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/kafka.conf b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/kafka.conf deleted file mode 100644 index d975de438..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/kafka.conf +++ /dev/null @@ -1,5 +0,0 @@ -start on started zookeeper-ZK_PORT -stop on stopping zookeeper-ZK_PORT - -pre-start exec sleep 2 -exec /opt/kafka-KAFKAID/bin/kafka-server-start.sh /opt/kafka-KAFKAID/config/server.properties diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/provision.sh b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/provision.sh deleted file mode 100644 index 0a2d77785..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/provision.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh - -set -ex - -apt-get update -yes | apt-get install default-jre - -export KAFKA_INSTALL_ROOT=/opt -export KAFKA_HOSTNAME=192.168.100.67 -export KAFKA_VERSION=0.8.2.1 -export REPOSITORY_ROOT=/vagrant - -sh /vagrant/vagrant/install_cluster.sh -sh /vagrant/vagrant/setup_services.sh -sh /vagrant/vagrant/create_topics.sh diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh deleted file mode 100644 index e52c00e7b..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -set -ex - -${KAFKA_INSTALL_ROOT}/toxiproxy -port 8474 -host 0.0.0.0 & -PID=$! - -while ! nc -q 1 localhost 8474 - -# The number of threads handling network requests -num.network.threads=2 - -# The number of threads doing disk I/O -num.io.threads=8 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=1048576 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=1048576 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# A comma seperated list of directories under which to store log files -log.dirs=KAFKA_DATADIR - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=2 - -# Create new topics with a replication factor of 2 so failover can be tested -# more easily. -default.replication.factor=2 - -auto.create.topics.enable=false -delete.topic.enable=true - -############################# Log Flush Policy ############################# - -# Messages are immediately written to the filesystem but by default we only fsync() to sync -# the OS cache lazily. The following configurations control the flush of data to disk. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data may be lost if you are not using replication. -# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. -# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -#log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -#log.flush.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining -# segments don't drop below log.retention.bytes. -log.retention.bytes=268435456 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -log.segment.bytes=268435456 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.retention.check.interval.ms=60000 - -# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires. -# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction. -log.cleaner.enable=false - -############################# Zookeeper ############################# - -# Zookeeper connection string (see zookeeper docs for details). -# This is a comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". -# You can also append an optional chroot string to the urls to specify the -# root directory for all kafka znodes. -zookeeper.connect=localhost:ZK_PORT - -# Timeout in ms for connecting to zookeeper -zookeeper.session.timeout.ms=3000 -zookeeper.connection.timeout.ms=3000 diff --git a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/setup_services.sh b/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/setup_services.sh deleted file mode 100644 index 81d8ea05d..000000000 --- a/Godeps/_workspace/src/github.com/Shopify/sarama/vagrant/setup_services.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/sh - -set -ex - -stop toxiproxy || true -cp ${REPOSITORY_ROOT}/vagrant/toxiproxy.conf /etc/init/toxiproxy.conf -cp ${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh ${KAFKA_INSTALL_ROOT}/ -start toxiproxy - -for i in 1 2 3 4 5; do - ZK_PORT=`expr $i + 2180` - KAFKA_PORT=`expr $i + 9090` - - stop zookeeper-${ZK_PORT} || true - - # set up zk service - cp ${REPOSITORY_ROOT}/vagrant/zookeeper.conf /etc/init/zookeeper-${ZK_PORT}.conf - sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/zookeeper-${ZK_PORT}.conf - - # set up kafka service - cp ${REPOSITORY_ROOT}/vagrant/kafka.conf /etc/init/kafka-${KAFKA_PORT}.conf - sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf - sed -i s/ZK_PORT/${ZK_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf - - start zookeeper-${ZK_PORT} -done - -# Wait for the last kafka node to finish booting -while ! nc -q 1 localhost 29095  [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` - - -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&logrus.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). - - ```go - logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) - ``` - -Third party logging formatters: - -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - - -[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go deleted file mode 100644 index 04673a075..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go +++ /dev/null @@ -1,256 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "io" - "os" - "time" -) - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic - Level Level - - // Message passed to Debug, Info, Warn, Error, Fatal or Panic - Message string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), - } -} - -// Returns a reader for the entry, which is a proxy to the formatter. -func (entry *Entry) Reader() (*bytes.Buffer, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - return bytes.NewBuffer(serialized), err -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - reader, err := entry.Reader() - if err != nil { - return "", err - } - - return reader.String(), err -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := Fields{} - for k, v := range entry.Data { - data[k] = v - } - for k, v := range fields { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data} -} - -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) log(level Level, msg string) { - entry.Time = time.Now() - entry.Level = level - entry.Message = msg - - if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() - } - - reader, err := entry.Reader() - if err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() - } - - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - - _, err = io.Copy(entry.Logger.Out, reader) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(&entry) - } -} - -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } -} - -// Entry Println family functions - -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go deleted file mode 100644 index 98717df49..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestEntryPanicln(t *testing.T) { - errBoom := fmt.Errorf("boom time") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicln("kaboom") -} - -func TestEntryPanicf(t *testing.T) { - errBoom := fmt.Errorf("boom again") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom true", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicf("kaboom %v", true) -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go deleted file mode 100644 index a1623ec00..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go +++ /dev/null @@ -1,50 +0,0 @@ -package main - -import ( - "github.com/Sirupsen/logrus" -) - -var log = logrus.New() - -func init() { - log.Formatter = new(logrus.JSONFormatter) - log.Formatter = new(logrus.TextFormatter) // default - log.Level = logrus.DebugLevel -} - -func main() { - defer func() { - err := recover() - if err != nil { - log.WithFields(logrus.Fields{ - "omg": true, - "err": err, - "number": 100, - }).Fatal("The ice breaks!") - } - }() - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "number": 8, - }).Debug("Started observing beach") - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "temperature": -4, - }).Debug("Temperature changes") - - log.WithFields(logrus.Fields{ - "animal": "orca", - "size": 9009, - }).Panic("It's over 9000!") -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go deleted file mode 100644 index cb5759a35..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -import ( - "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" -) - -var log = logrus.New() - -func init() { - log.Formatter = new(logrus.TextFormatter) // default - log.Hooks.Add(airbrake.NewHook("https://example.com", "xyz", "development")) -} - -func main() { - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go deleted file mode 100644 index a67e1b802..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go +++ /dev/null @@ -1,188 +0,0 @@ -package logrus - -import ( - "io" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() - std.Level = level -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.Level -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go deleted file mode 100644 index 104d689f1..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go +++ /dev/null @@ -1,48 +0,0 @@ -package logrus - -import "time" - -const DefaultTimestampFormat = time.RFC3339 - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - _, ok := data["time"] - if ok { - data["fields.time"] = data["time"] - } - - _, ok = data["msg"] - if ok { - data["fields.msg"] = data["msg"] - } - - _, ok = data["level"] - if ok { - data["fields.level"] = data["level"] - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go deleted file mode 100644 index c6d290c77..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package logrus - -import ( - "fmt" - "testing" - "time" -) - -// smallFields is a small size data set for benchmarking -var smallFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", -} - -// largeFields is a large size data set for benchmarking -var largeFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", - "five": "six", - "seven": "eight", - "nine": "ten", - "eleven": "twelve", - "thirteen": "fourteen", - "fifteen": "sixteen", - "seventeen": "eighteen", - "nineteen": "twenty", - "a": "b", - "c": "d", - "e": "f", - "g": "h", - "i": "j", - "k": "l", - "m": "n", - "o": "p", - "q": "r", - "s": "t", - "u": "v", - "w": "x", - "y": "z", - "this": "will", - "make": "thirty", - "entries": "yeah", -} - -var errorFields = Fields{ - "foo": fmt.Errorf("bar"), - "baz": fmt.Errorf("qux"), -} - -func BenchmarkErrorTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) -} - -func BenchmarkSmallTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) -} - -func BenchmarkLargeTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) -} - -func BenchmarkSmallColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) -} - -func BenchmarkLargeColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) -} - -func BenchmarkSmallJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, smallFields) -} - -func BenchmarkLargeJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, largeFields) -} - -func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { - entry := &Entry{ - Time: time.Time{}, - Level: InfoLevel, - Message: "message", - Data: fields, - } - var d []byte - var err error - for i := 0; i < b.N; i++ { - d, err = formatter.Format(entry) - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(d))) - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go deleted file mode 100644 index 8ea93ddf2..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go +++ /dev/null @@ -1,56 +0,0 @@ -package logstash - -import ( - "encoding/json" - "fmt" - - "github.com/Sirupsen/logrus" -) - -// Formatter generates json in logstash format. -// Logstash site: http://logstash.net/ -type LogstashFormatter struct { - Type string // if not empty use for logstash type field. - - // TimestampFormat sets the format used for timestamps. - TimestampFormat string -} - -func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) { - entry.Data["@version"] = 1 - - if f.TimestampFormat == "" { - f.TimestampFormat = logrus.DefaultTimestampFormat - } - - entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat) - - // set message field - v, ok := entry.Data["message"] - if ok { - entry.Data["fields.message"] = v - } - entry.Data["message"] = entry.Message - - // set level field - v, ok = entry.Data["level"] - if ok { - entry.Data["fields.level"] = v - } - entry.Data["level"] = entry.Level.String() - - // set type field - if f.Type != "" { - v, ok = entry.Data["type"] - if ok { - entry.Data["fields.type"] = v - } - entry.Data["type"] = f.Type - } - - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go deleted file mode 100644 index d8814a0ea..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package logstash - -import ( - "bytes" - "encoding/json" - "github.com/Sirupsen/logrus" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestLogstashFormatter(t *testing.T) { - assert := assert.New(t) - - lf := LogstashFormatter{Type: "abc"} - - fields := logrus.Fields{ - "message": "def", - "level": "ijk", - "type": "lmn", - "one": 1, - "pi": 3.14, - "bool": true, - } - - entry := logrus.WithFields(fields) - entry.Message = "msg" - entry.Level = logrus.InfoLevel - - b, _ := lf.Format(entry) - - var data map[string]interface{} - dec := json.NewDecoder(bytes.NewReader(b)) - dec.UseNumber() - dec.Decode(&data) - - // base fields - assert.Equal(json.Number("1"), data["@version"]) - assert.NotEmpty(data["@timestamp"]) - assert.Equal("abc", data["type"]) - assert.Equal("msg", data["message"]) - assert.Equal("info", data["level"]) - - // substituted fields - assert.Equal("def", data["fields.message"]) - assert.Equal("ijk", data["fields.level"]) - assert.Equal("lmn", data["fields.type"]) - - // formats - assert.Equal(json.Number("1"), data["one"]) - assert.Equal(json.Number("3.14"), data["pi"]) - assert.Equal(true, data["bool"]) -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go deleted file mode 100644 index 13f34cb6f..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package logrus - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type TestHook struct { - Fired bool -} - -func (hook *TestHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *TestHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookFires(t *testing.T) { - hook := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - assert.Equal(t, hook.Fired, false) - - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} - -type ModifyHook struct { -} - -func (hook *ModifyHook) Fire(entry *Entry) error { - entry.Data["wow"] = "whale" - return nil -} - -func (hook *ModifyHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookCanModifyEntry(t *testing.T) { - hook := new(ModifyHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - }) -} - -func TestCanFireMultipleHooks(t *testing.T) { - hook1 := new(ModifyHook) - hook2 := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook1) - log.Hooks.Add(hook2) - - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - assert.Equal(t, hook2.Fired, true) - }) -} - -type ErrorHook struct { - Fired bool -} - -func (hook *ErrorHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *ErrorHook) Levels() []Level { - return []Level{ - ErrorLevel, - } -} - -func TestErrorHookShouldntFireOnInfo(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, false) - }) -} - -func TestErrorHookShouldFireOnError(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Error("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go deleted file mode 100644 index 3f151cdc3..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go deleted file mode 100644 index b0502c335..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go +++ /dev/null @@ -1,54 +0,0 @@ -package airbrake - -import ( - "errors" - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/tobi/airbrake-go" -) - -// AirbrakeHook to send exceptions to an exception-tracking service compatible -// with the Airbrake API. -type airbrakeHook struct { - APIKey string - Endpoint string - Environment string -} - -func NewHook(endpoint, apiKey, env string) *airbrakeHook { - return &airbrakeHook{ - APIKey: apiKey, - Endpoint: endpoint, - Environment: env, - } -} - -func (hook *airbrakeHook) Fire(entry *logrus.Entry) error { - airbrake.ApiKey = hook.APIKey - airbrake.Endpoint = hook.Endpoint - airbrake.Environment = hook.Environment - - var notifyErr error - err, ok := entry.Data["error"].(error) - if ok { - notifyErr = err - } else { - notifyErr = errors.New(entry.Message) - } - - airErr := airbrake.Notify(notifyErr) - if airErr != nil { - return fmt.Errorf("Failed to send error to Airbrake: %s", airErr) - } - - return nil -} - -func (hook *airbrakeHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.ErrorLevel, - logrus.FatalLevel, - logrus.PanicLevel, - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go deleted file mode 100644 index 058a91e34..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package airbrake - -import ( - "encoding/xml" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/Sirupsen/logrus" -) - -type notice struct { - Error NoticeError `xml:"error"` -} -type NoticeError struct { - Class string `xml:"class"` - Message string `xml:"message"` -} - -type customErr struct { - msg string -} - -func (e *customErr) Error() string { - return e.msg -} - -const ( - testAPIKey = "abcxyz" - testEnv = "development" - expectedClass = "*airbrake.customErr" - expectedMsg = "foo" - unintendedMsg = "Airbrake will not see this string" -) - -var ( - noticeError = make(chan NoticeError, 1) -) - -// TestLogEntryMessageReceived checks if invoking Logrus' log.Error -// method causes an XML payload containing the log entry message is received -// by a HTTP server emulating an Airbrake-compatible endpoint. -func TestLogEntryMessageReceived(t *testing.T) { - log := logrus.New() - ts := startAirbrakeServer(t) - defer ts.Close() - - hook := NewHook(ts.URL, testAPIKey, "production") - log.Hooks.Add(hook) - - log.Error(expectedMsg) - - select { - case received := <-noticeError: - if received.Message != expectedMsg { - t.Errorf("Unexpected message received: %s", received.Message) - } - case <-time.After(time.Second): - t.Error("Timed out; no notice received by Airbrake API") - } -} - -// TestLogEntryMessageReceived confirms that, when passing an error type using -// logrus.Fields, a HTTP server emulating an Airbrake endpoint receives the -// error message returned by the Error() method on the error interface -// rather than the logrus.Entry.Message string. -func TestLogEntryWithErrorReceived(t *testing.T) { - log := logrus.New() - ts := startAirbrakeServer(t) - defer ts.Close() - - hook := NewHook(ts.URL, testAPIKey, "production") - log.Hooks.Add(hook) - - log.WithFields(logrus.Fields{ - "error": &customErr{expectedMsg}, - }).Error(unintendedMsg) - - select { - case received := <-noticeError: - if received.Message != expectedMsg { - t.Errorf("Unexpected message received: %s", received.Message) - } - if received.Class != expectedClass { - t.Errorf("Unexpected error class: %s", received.Class) - } - case <-time.After(time.Second): - t.Error("Timed out; no notice received by Airbrake API") - } -} - -// TestLogEntryWithNonErrorTypeNotReceived confirms that, when passing a -// non-error type using logrus.Fields, a HTTP server emulating an Airbrake -// endpoint receives the logrus.Entry.Message string. -// -// Only error types are supported when setting the 'error' field using -// logrus.WithFields(). -func TestLogEntryWithNonErrorTypeNotReceived(t *testing.T) { - log := logrus.New() - ts := startAirbrakeServer(t) - defer ts.Close() - - hook := NewHook(ts.URL, testAPIKey, "production") - log.Hooks.Add(hook) - - log.WithFields(logrus.Fields{ - "error": expectedMsg, - }).Error(unintendedMsg) - - select { - case received := <-noticeError: - if received.Message != unintendedMsg { - t.Errorf("Unexpected message received: %s", received.Message) - } - case <-time.After(time.Second): - t.Error("Timed out; no notice received by Airbrake API") - } -} - -func startAirbrakeServer(t *testing.T) *httptest.Server { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var notice notice - if err := xml.NewDecoder(r.Body).Decode(¬ice); err != nil { - t.Error(err) - } - r.Body.Close() - - noticeError <- notice.Error - })) - - return ts -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go deleted file mode 100644 index d20a0f54a..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go +++ /dev/null @@ -1,68 +0,0 @@ -package logrus_bugsnag - -import ( - "errors" - - "github.com/Sirupsen/logrus" - "github.com/bugsnag/bugsnag-go" -) - -type bugsnagHook struct{} - -// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before -// bugsnag.Configure. Bugsnag must be configured before the hook. -var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook") - -// ErrBugsnagSendFailed indicates that the hook failed to submit an error to -// bugsnag. The error was successfully generated, but `bugsnag.Notify()` -// failed. -type ErrBugsnagSendFailed struct { - err error -} - -func (e ErrBugsnagSendFailed) Error() string { - return "failed to send error to Bugsnag: " + e.err.Error() -} - -// NewBugsnagHook initializes a logrus hook which sends exceptions to an -// exception-tracking service compatible with the Bugsnag API. Before using -// this hook, you must call bugsnag.Configure(). The returned object should be -// registered with a log via `AddHook()` -// -// Entries that trigger an Error, Fatal or Panic should now include an "error" -// field to send to Bugsnag. -func NewBugsnagHook() (*bugsnagHook, error) { - if bugsnag.Config.APIKey == "" { - return nil, ErrBugsnagUnconfigured - } - return &bugsnagHook{}, nil -} - -// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the -// "error" field (or the Message if the error isn't present) and sends it off. -func (hook *bugsnagHook) Fire(entry *logrus.Entry) error { - var notifyErr error - err, ok := entry.Data["error"].(error) - if ok { - notifyErr = err - } else { - notifyErr = errors.New(entry.Message) - } - - bugsnagErr := bugsnag.Notify(notifyErr) - if bugsnagErr != nil { - return ErrBugsnagSendFailed{bugsnagErr} - } - - return nil -} - -// Levels enumerates the log levels on which the error should be forwarded to -// bugsnag: everything at or above the "Error" level. -func (hook *bugsnagHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.ErrorLevel, - logrus.FatalLevel, - logrus.PanicLevel, - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go deleted file mode 100644 index e9ea298d8..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package logrus_bugsnag - -import ( - "encoding/json" - "errors" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/Sirupsen/logrus" - "github.com/bugsnag/bugsnag-go" -) - -type notice struct { - Events []struct { - Exceptions []struct { - Message string `json:"message"` - } `json:"exceptions"` - } `json:"events"` -} - -func TestNoticeReceived(t *testing.T) { - msg := make(chan string, 1) - expectedMsg := "foo" - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var notice notice - data, _ := ioutil.ReadAll(r.Body) - if err := json.Unmarshal(data, ¬ice); err != nil { - t.Error(err) - } - _ = r.Body.Close() - - msg <- notice.Events[0].Exceptions[0].Message - })) - defer ts.Close() - - hook := &bugsnagHook{} - - bugsnag.Configure(bugsnag.Configuration{ - Endpoint: ts.URL, - ReleaseStage: "production", - APIKey: "12345678901234567890123456789012", - Synchronous: true, - }) - - log := logrus.New() - log.Hooks.Add(hook) - - log.WithFields(logrus.Fields{ - "error": errors.New(expectedMsg), - }).Error("Bugsnag will not see this string") - - select { - case received := <-msg: - if received != expectedMsg { - t.Errorf("Unexpected message received: %s", received) - } - case <-time.After(time.Second): - t.Error("Timed out; no notice received by Bugsnag API") - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md deleted file mode 100644 index ae61e9229..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Papertrail Hook for Logrus :walrus: - -[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts). - -In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible. - -## Usage - -You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`. - -For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs. - -```go -import ( - "log/syslog" - "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/papertrail" -) - -func main() { - log := logrus.New() - hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME) - - if err == nil { - log.Hooks.Add(hook) - } -} -``` diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go deleted file mode 100644 index c0f10c1bd..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go +++ /dev/null @@ -1,55 +0,0 @@ -package logrus_papertrail - -import ( - "fmt" - "net" - "os" - "time" - - "github.com/Sirupsen/logrus" -) - -const ( - format = "Jan 2 15:04:05" -) - -// PapertrailHook to send logs to a logging service compatible with the Papertrail API. -type PapertrailHook struct { - Host string - Port int - AppName string - UDPConn net.Conn -} - -// NewPapertrailHook creates a hook to be added to an instance of logger. -func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) { - conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port)) - return &PapertrailHook{host, port, appName, conn}, err -} - -// Fire is called when a log event is fired. -func (hook *PapertrailHook) Fire(entry *logrus.Entry) error { - date := time.Now().Format(format) - msg, _ := entry.String() - payload := fmt.Sprintf("<22> %s %s: %s", date, hook.AppName, msg) - - bytesWritten, err := hook.UDPConn.Write([]byte(payload)) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err) - return err - } - - return nil -} - -// Levels returns the available logging levels. -func (hook *PapertrailHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - logrus.DebugLevel, - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go deleted file mode 100644 index 96318d003..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package logrus_papertrail - -import ( - "fmt" - "testing" - - "github.com/Sirupsen/logrus" - "github.com/stvp/go-udp-testing" -) - -func TestWritingToUDP(t *testing.T) { - port := 16661 - udp.SetAddr(fmt.Sprintf(":%d", port)) - - hook, err := NewPapertrailHook("localhost", port, "test") - if err != nil { - t.Errorf("Unable to connect to local UDP server.") - } - - log := logrus.New() - log.Hooks.Add(hook) - - udp.ShouldReceive(t, "foo", func() { - log.Info("foo") - }) -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md deleted file mode 100644 index 31de6540a..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md +++ /dev/null @@ -1,111 +0,0 @@ -# Sentry Hook for Logrus :walrus: - -[Sentry](https://getsentry.com) provides both self-hosted and hosted -solutions for exception tracking. -Both client and server are -[open source](https://github.com/getsentry/sentry). - -## Usage - -Every sentry application defined on the server gets a different -[DSN](https://www.getsentry.com/docs/). In the example below replace -`YOUR_DSN` with the one created for your application. - -```go -import ( - "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/sentry" -) - -func main() { - log := logrus.New() - hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - }) - - if err == nil { - log.Hooks.Add(hook) - } -} -``` - -If you wish to initialize a SentryHook with tags, you can use the `NewWithTagsSentryHook` constructor to provide default tags: - -```go -tags := map[string]string{ - "site": "example.com", -} -levels := []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, -} -hook, err := logrus_sentry.NewWithTagsSentryHook(YOUR_DSN, tags, levels) - -``` - -If you wish to initialize a SentryHook with an already initialized raven client, you can use -the `NewWithClientSentryHook` constructor: - -```go -import ( - "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/sentry" - "github.com/getsentry/raven-go" -) - -func main() { - log := logrus.New() - - client, err := raven.New(YOUR_DSN) - if err != nil { - log.Fatal(err) - } - - hook, err := logrus_sentry.NewWithClientSentryHook(client, []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - }) - - if err == nil { - log.Hooks.Add(hook) - } -} - -hook, err := NewWithClientSentryHook(client, []logrus.Level{ - logrus.ErrorLevel, -}) -``` - -## Special fields - -Some logrus fields have a special meaning in this hook, -these are `server_name`, `logger` and `http_request`. -When logs are sent to sentry these fields are treated differently. -- `server_name` (also known as hostname) is the name of the server which -is logging the event (hostname.example.com) -- `logger` is the part of the application which is logging the event. -In go this usually means setting it to the name of the package. -- `http_request` is the in-coming request(*http.Request). The detailed request data are sent to Sentry. - -## Timeout - -`Timeout` is the time the sentry hook will wait for a response -from the sentry server. - -If this time elapses with no response from -the server an error will be returned. - -If `Timeout` is set to 0 the SentryHook will not wait for a reply -and will assume a correct delivery. - -The SentryHook has a default timeout of `100 milliseconds` when created -with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field: - -```go -hook, _ := logrus_sentry.NewSentryHook(...) -hook.Timeout = 20*time.Second -``` diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go deleted file mode 100644 index cf88098a8..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go +++ /dev/null @@ -1,137 +0,0 @@ -package logrus_sentry - -import ( - "fmt" - "net/http" - "time" - - "github.com/Sirupsen/logrus" - "github.com/getsentry/raven-go" -) - -var ( - severityMap = map[logrus.Level]raven.Severity{ - logrus.DebugLevel: raven.DEBUG, - logrus.InfoLevel: raven.INFO, - logrus.WarnLevel: raven.WARNING, - logrus.ErrorLevel: raven.ERROR, - logrus.FatalLevel: raven.FATAL, - logrus.PanicLevel: raven.FATAL, - } -) - -func getAndDel(d logrus.Fields, key string) (string, bool) { - var ( - ok bool - v interface{} - val string - ) - if v, ok = d[key]; !ok { - return "", false - } - - if val, ok = v.(string); !ok { - return "", false - } - delete(d, key) - return val, true -} - -func getAndDelRequest(d logrus.Fields, key string) (*http.Request, bool) { - var ( - ok bool - v interface{} - req *http.Request - ) - if v, ok = d[key]; !ok { - return nil, false - } - if req, ok = v.(*http.Request); !ok || req == nil { - return nil, false - } - delete(d, key) - return req, true -} - -// SentryHook delivers logs to a sentry server. -type SentryHook struct { - // Timeout sets the time to wait for a delivery error from the sentry server. - // If this is set to zero the server will not wait for any response and will - // consider the message correctly sent - Timeout time.Duration - - client *raven.Client - levels []logrus.Level -} - -// NewSentryHook creates a hook to be added to an instance of logger -// and initializes the raven client. -// This method sets the timeout to 100 milliseconds. -func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) { - client, err := raven.New(DSN) - if err != nil { - return nil, err - } - return &SentryHook{100 * time.Millisecond, client, levels}, nil -} - -// NewWithTagsSentryHook creates a hook with tags to be added to an instance -// of logger and initializes the raven client. This method sets the timeout to -// 100 milliseconds. -func NewWithTagsSentryHook(DSN string, tags map[string]string, levels []logrus.Level) (*SentryHook, error) { - client, err := raven.NewWithTags(DSN, tags) - if err != nil { - return nil, err - } - return &SentryHook{100 * time.Millisecond, client, levels}, nil -} - -// NewWithClientSentryHook creates a hook using an initialized raven client. -// This method sets the timeout to 100 milliseconds. -func NewWithClientSentryHook(client *raven.Client, levels []logrus.Level) (*SentryHook, error) { - return &SentryHook{100 * time.Millisecond, client, levels}, nil -} - -// Called when an event should be sent to sentry -// Special fields that sentry uses to give more information to the server -// are extracted from entry.Data (if they are found) -// These fields are: logger, server_name and http_request -func (hook *SentryHook) Fire(entry *logrus.Entry) error { - packet := &raven.Packet{ - Message: entry.Message, - Timestamp: raven.Timestamp(entry.Time), - Level: severityMap[entry.Level], - Platform: "go", - } - - d := entry.Data - - if logger, ok := getAndDel(d, "logger"); ok { - packet.Logger = logger - } - if serverName, ok := getAndDel(d, "server_name"); ok { - packet.ServerName = serverName - } - if req, ok := getAndDelRequest(d, "http_request"); ok { - packet.Interfaces = append(packet.Interfaces, raven.NewHttp(req)) - } - packet.Extra = map[string]interface{}(d) - - _, errCh := hook.client.Capture(packet, nil) - timeout := hook.Timeout - if timeout != 0 { - timeoutCh := time.After(timeout) - select { - case err := <-errCh: - return err - case <-timeoutCh: - return fmt.Errorf("no response from sentry server in %s", timeout) - } - } - return nil -} - -// Levels returns the available logging levels. -func (hook *SentryHook) Levels() []logrus.Level { - return hook.levels -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go deleted file mode 100644 index 4a97bc63e..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package logrus_sentry - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "reflect" - "strings" - "testing" - - "github.com/Sirupsen/logrus" - "github.com/getsentry/raven-go" -) - -const ( - message = "error message" - server_name = "testserver.internal" - logger_name = "test.logger" -) - -func getTestLogger() *logrus.Logger { - l := logrus.New() - l.Out = ioutil.Discard - return l -} - -func WithTestDSN(t *testing.T, tf func(string, <-chan *raven.Packet)) { - pch := make(chan *raven.Packet, 1) - s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - defer req.Body.Close() - d := json.NewDecoder(req.Body) - p := &raven.Packet{} - err := d.Decode(p) - if err != nil { - t.Fatal(err.Error()) - } - - pch <- p - })) - defer s.Close() - - fragments := strings.SplitN(s.URL, "://", 2) - dsn := fmt.Sprintf( - "%s://public:secret@%s/sentry/project-id", - fragments[0], - fragments[1], - ) - tf(dsn, pch) -} - -func TestSpecialFields(t *testing.T) { - WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) { - logger := getTestLogger() - - hook, err := NewSentryHook(dsn, []logrus.Level{ - logrus.ErrorLevel, - }) - - if err != nil { - t.Fatal(err.Error()) - } - logger.Hooks.Add(hook) - - req, _ := http.NewRequest("GET", "url", nil) - logger.WithFields(logrus.Fields{ - "server_name": server_name, - "logger": logger_name, - "http_request": req, - }).Error(message) - - packet := <-pch - if packet.Logger != logger_name { - t.Errorf("logger should have been %s, was %s", logger_name, packet.Logger) - } - - if packet.ServerName != server_name { - t.Errorf("server_name should have been %s, was %s", server_name, packet.ServerName) - } - }) -} - -func TestSentryHandler(t *testing.T) { - WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) { - logger := getTestLogger() - hook, err := NewSentryHook(dsn, []logrus.Level{ - logrus.ErrorLevel, - }) - if err != nil { - t.Fatal(err.Error()) - } - logger.Hooks.Add(hook) - - logger.Error(message) - packet := <-pch - if packet.Message != message { - t.Errorf("message should have been %s, was %s", message, packet.Message) - } - }) -} - -func TestSentryWithClient(t *testing.T) { - WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) { - logger := getTestLogger() - - client, _ := raven.New(dsn) - - hook, err := NewWithClientSentryHook(client, []logrus.Level{ - logrus.ErrorLevel, - }) - if err != nil { - t.Fatal(err.Error()) - } - logger.Hooks.Add(hook) - - logger.Error(message) - packet := <-pch - if packet.Message != message { - t.Errorf("message should have been %s, was %s", message, packet.Message) - } - }) -} - -func TestSentryTags(t *testing.T) { - WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) { - logger := getTestLogger() - tags := map[string]string{ - "site": "test", - } - levels := []logrus.Level{ - logrus.ErrorLevel, - } - - hook, err := NewWithTagsSentryHook(dsn, tags, levels) - if err != nil { - t.Fatal(err.Error()) - } - - logger.Hooks.Add(hook) - - logger.Error(message) - packet := <-pch - expected := raven.Tags{ - raven.Tag{ - Key: "site", - Value: "test", - }, - } - if !reflect.DeepEqual(packet.Tags, expected) { - t.Errorf("message should have been %s, was %s", message, packet.Message) - } - }) -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md deleted file mode 100644 index 4dbb8e729..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Syslog Hooks for Logrus :walrus: - -## Usage - -```go -import ( - "log/syslog" - "github.com/Sirupsen/logrus" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" -) - -func main() { - log := logrus.New() - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - - if err == nil { - log.Hooks.Add(hook) - } -} -``` diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go deleted file mode 100644 index b6fa37462..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go +++ /dev/null @@ -1,59 +0,0 @@ -package logrus_syslog - -import ( - "fmt" - "github.com/Sirupsen/logrus" - "log/syslog" - "os" -) - -// SyslogHook to send logs via syslog. -type SyslogHook struct { - Writer *syslog.Writer - SyslogNetwork string - SyslogRaddr string -} - -// Creates a hook to be added to an instance of logger. This is called with -// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` -// `if err == nil { log.Hooks.Add(hook) }` -func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { - w, err := syslog.Dial(network, raddr, priority, tag) - return &SyslogHook{w, network, raddr}, err -} - -func (hook *SyslogHook) Fire(entry *logrus.Entry) error { - line, err := entry.String() - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) - return err - } - - switch entry.Level { - case logrus.PanicLevel: - return hook.Writer.Crit(line) - case logrus.FatalLevel: - return hook.Writer.Crit(line) - case logrus.ErrorLevel: - return hook.Writer.Err(line) - case logrus.WarnLevel: - return hook.Writer.Warning(line) - case logrus.InfoLevel: - return hook.Writer.Info(line) - case logrus.DebugLevel: - return hook.Writer.Debug(line) - default: - return nil - } -} - -func (hook *SyslogHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - logrus.DebugLevel, - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go deleted file mode 100644 index 42762dc10..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package logrus_syslog - -import ( - "github.com/Sirupsen/logrus" - "log/syslog" - "testing" -) - -func TestLocalhostAddAndPrint(t *testing.T) { - log := logrus.New() - hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - - if err != nil { - t.Errorf("Unable to connect to local syslog.") - } - - log.Hooks.Add(hook) - - for _, level := range hook.Levels() { - if len(log.Hooks[level]) != 1 { - t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) - } - } - - log.Info("Congratulations!") -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index 2ad6dc5cf..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,41 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" -) - -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string -} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - prefixFieldClashes(data) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - - data["time"] = entry.Time.Format(timestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go deleted file mode 100644 index 1d7087325..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package logrus - -import ( - "encoding/json" - "errors" - - "testing" -) - -func TestErrorNotLost(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["error"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["omg"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestFieldClashWithTime(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("time", "right now!")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.time"] != "right now!" { - t.Fatal("fields.time not set to original time field") - } - - if entry["time"] != "0001-01-01T00:00:00Z" { - t.Fatal("time field not set to current time, was: ", entry["time"]) - } -} - -func TestFieldClashWithMsg(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("msg", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.msg"] != "something" { - t.Fatal("fields.msg not set to original msg field") - } -} - -func TestFieldClashWithLevel(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.level"] != "something" { - t.Fatal("fields.level not set to original level field") - } -} - -func TestJSONEntryEndsWithNewline(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - if b[len(b)-1] != '\n' { - t.Fatal("Expected JSON log entry to end with a newline") - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go deleted file mode 100644 index dd9975931..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go +++ /dev/null @@ -1,206 +0,0 @@ -package logrus - -import ( - "io" - "os" - "sync" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventorous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in - Level Level - // Used to sync writing to the log. - mu sync.Mutex -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(LevelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, - } -} - -// Adds a field to the log entry, note that you it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// Ff you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - return NewEntry(logger).WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - return NewEntry(logger).WithFields(fields) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugf(format, args...) - } -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infof(format, args...) - } -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - NewEntry(logger).Printf(format, args...) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorf(format, args...) - } -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalf(format, args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicf(format, args...) - } -} - -func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debug(args...) - } -} - -func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Info(args...) - } -} - -func (logger *Logger) Print(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Error(args...) - } -} - -func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatal(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panic(args...) - } -} - -func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugln(args...) - } -} - -func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infoln(args...) - } -} - -func (logger *Logger) Println(args ...interface{}) { - NewEntry(logger).Println(args...) -} - -func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorln(args...) - } -} - -func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalln(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicln(args...) - } -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go deleted file mode 100644 index 43ee12e90..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go +++ /dev/null @@ -1,94 +0,0 @@ -package logrus - -import ( - "fmt" - "log" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint8 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" - } - - return "unknown" -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch lvl { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var _ StdLogger = &log.Logger{} - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go deleted file mode 100644 index efaacea23..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go +++ /dev/null @@ -1,301 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "strconv" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - log(logger) - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assertions(fields) -} - -func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { - var buffer bytes.Buffer - - logger := New() - logger.Out = &buffer - logger.Formatter = &TextFormatter{ - DisableColors: true, - } - - log(logger) - - fields := make(map[string]string) - for _, kv := range strings.Split(buffer.String(), " ") { - if !strings.Contains(kv, "=") { - continue - } - kvArr := strings.Split(kv, "=") - key := strings.TrimSpace(kvArr[0]) - val := kvArr[1] - if kvArr[1][0] == '"' { - var err error - val, err = strconv.Unquote(val) - assert.NoError(t, err) - } - fields[key] = val - } - assertions(fields) -} - -func TestPrint(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestInfo(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestWarn(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Warn("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "warning") - }) -} - -func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test test") - }) -} - -func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test 10") - }) -} - -func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "testtest") - }) -} - -func TestWithFieldsShouldAllowAssignments(t *testing.T) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - localLog := logger.WithFields(Fields{ - "key1": "value1", - }) - - localLog.WithField("key2", "value2").Info("test") - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assert.Equal(t, "value2", fields["key2"]) - assert.Equal(t, "value1", fields["key1"]) - - buffer = bytes.Buffer{} - fields = Fields{} - localLog.Info("test") - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - _, ok := fields["key2"] - assert.Equal(t, false, ok) - assert.Equal(t, "value1", fields["key1"]) -} - -func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - }) -} - -func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["fields.msg"], "hello") - }) -} - -func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("time", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["fields.time"], "hello") - }) -} - -func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("level", 1).Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["level"], "info") - assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only - }) -} - -func TestDefaultFieldsAreNotPrefixed(t *testing.T) { - LogAndAssertText(t, func(log *Logger) { - ll := log.WithField("herp", "derp") - ll.Info("hello") - ll.Info("bye") - }, func(fields map[string]string) { - for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { - if _, ok := fields[fieldName]; ok { - t.Fatalf("should not have prefixed %q: %v", fieldName, fields) - } - } - }) -} - -func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { - - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - llog := logger.WithField("context", "eating raw fish") - - llog.Info("looks delicious") - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded first message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "looks delicious") - assert.Equal(t, fields["context"], "eating raw fish") - - buffer.Reset() - - llog.Warn("omg it is!") - - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded second message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "omg it is!") - assert.Equal(t, fields["context"], "eating raw fish") - assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") - -} - -func TestConvertLevelToString(t *testing.T) { - assert.Equal(t, "debug", DebugLevel.String()) - assert.Equal(t, "info", InfoLevel.String()) - assert.Equal(t, "warning", WarnLevel.String()) - assert.Equal(t, "error", ErrorLevel.String()) - assert.Equal(t, "fatal", FatalLevel.String()) - assert.Equal(t, "panic", PanicLevel.String()) -} - -func TestParseLevel(t *testing.T) { - l, err := ParseLevel("panic") - assert.Nil(t, err) - assert.Equal(t, PanicLevel, l) - - l, err = ParseLevel("fatal") - assert.Nil(t, err) - assert.Equal(t, FatalLevel, l) - - l, err = ParseLevel("error") - assert.Nil(t, err) - assert.Equal(t, ErrorLevel, l) - - l, err = ParseLevel("warn") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("warning") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("info") - assert.Nil(t, err) - assert.Equal(t, InfoLevel, l) - - l, err = ParseLevel("debug") - assert.Nil(t, err) - assert.Equal(t, DebugLevel, l) - - l, err = ParseLevel("invalid") - assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) -} - -func TestGetSetLevelRace(t *testing.T) { - wg := sync.WaitGroup{} - for i := 0; i < 100; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - if i%2 == 0 { - SetLevel(InfoLevel) - } else { - GetLevel() - } - }(i) - - } - wg.Wait() -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go deleted file mode 100644 index 71f8d67a5..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go deleted file mode 100644 index a2c0b40db..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index 4bb537602..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd netbsd dragonfly - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 2e09f6f7e..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 17cc29848..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,159 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "runtime" - "sort" - "strings" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 34 - gray = 37 -) - -var ( - baseTimestamp time.Time - isTerminal bool -) - -func init() { - baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) -} - -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool -} - -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var keys []string = make([]string, 0, len(entry.Data)) - for k := range entry.Data { - keys = append(keys, k) - } - - if !f.DisableSorting { - sort.Strings(keys) - } - - b := &bytes.Buffer{} - - prefixFieldClashes(entry.Data) - - isColorTerminal := isTerminal && (runtime.GOOS != "windows") - isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - if isColored { - f.printColored(b, entry, keys, timestampFormat) - } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - f.appendKeyValue(b, "msg", entry.Message) - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { - var levelColor int - switch entry.Level { - case DebugLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) - } - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) - } -} - -func needsQuoting(text string) bool { - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { - return false - } - } - return true -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - - b.WriteString(key) - b.WriteByte('=') - - switch value := value.(type) { - case string: - if needsQuoting(value) { - b.WriteString(value) - } else { - fmt.Fprintf(b, "%q", value) - } - case error: - errmsg := value.Error() - if needsQuoting(errmsg) { - b.WriteString(errmsg) - } else { - fmt.Fprintf(b, "%q", value) - } - default: - fmt.Fprint(b, value) - } - - b.WriteByte(' ') -} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go deleted file mode 100644 index e25a44f67..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package logrus - -import ( - "bytes" - "errors" - "testing" - "time" -) - -func TestQuoting(t *testing.T) { - tf := &TextFormatter{DisableColors: true} - - checkQuoting := func(q bool, value interface{}) { - b, _ := tf.Format(WithField("test", value)) - idx := bytes.Index(b, ([]byte)("test=")) - cont := bytes.Contains(b[idx+5:], []byte{'"'}) - if cont != q { - if q { - t.Errorf("quoting expected for: %#v", value) - } else { - t.Errorf("quoting not expected for: %#v", value) - } - } - } - - checkQuoting(false, "abcd") - checkQuoting(false, "v1.0") - checkQuoting(false, "1234567890") - checkQuoting(true, "/foobar") - checkQuoting(true, "x y") - checkQuoting(true, "x,y") - checkQuoting(false, errors.New("invalid")) - checkQuoting(true, errors.New("invalid argument")) -} - -func TestTimestampFormat(t *testing.T) { - checkTimeStr := func(format string) { - customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} - customStr, _ := customFormatter.Format(WithField("test", "test")) - timeStart := bytes.Index(customStr, ([]byte)("time=")) - timeEnd := bytes.Index(customStr, ([]byte)("level=")) - timeStr := customStr[timeStart+5 : timeEnd-1] - if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { - timeStr = timeStr[1 : len(timeStr)-1] - } - if format == "" { - format = time.RFC3339 - } - _, e := time.Parse(format, (string)(timeStr)) - if e != nil { - t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) - } - } - - checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") - checkTimeStr("Mon Jan _2 15:04:05 2006") - checkTimeStr("") -} - -// TODO add tests for sorting etc., this requires a parser for the text -// formatter output. diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go deleted file mode 100644 index 1e30b1c75..000000000 --- a/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go +++ /dev/null @@ -1,31 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -func (logger *Logger) Writer() *io.PipeWriter { - reader, writer := io.Pipe() - - go logger.writerScanner(reader) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (logger *Logger) writerScanner(reader *io.PipeReader) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - logger.Print(scanner.Text()) - } - if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/Godeps/_workspace/src/github.com/amir/raidman/README.md b/Godeps/_workspace/src/github.com/amir/raidman/README.md deleted file mode 100644 index 5dbe8684e..000000000 --- a/Godeps/_workspace/src/github.com/amir/raidman/README.md +++ /dev/null @@ -1,73 +0,0 @@ -Raidman -======= - -Go Riemann client - -```go -package main - -import ( - "github.com/amir/raidman" -) - -func main() { - c, err := raidman.Dial("tcp", "localhost:5555") - if err != nil { - panic(err) - } - - var event = &raidman.Event{ - State: "success", - Host: "raidman", - Service: "raidman-sample", - Metric: 100, - Ttl: 10, - } - - // send one event - err = c.Send(event) - if err != nil { - panic(err) - } - - // send multiple events at once - err = c.SendMulti([]*raidman.Event{ - &raidman.Event{ - State: "success", - Host: "raidman", - Service: "raidman-sample", - Metric: 100, - Ttl: 10, - }, - &raidman.Event{ - State: "failure", - Host: "raidman", - Service: "raidman-sample", - Metric: 100, - Ttl: 10, - }, - &raidman.Event{ - State: "success", - Host: "raidman", - Service: "raidman-sample", - Metric: 100, - Ttl: 10, - }, - }) - if err != nil { - panic(err) - } - - events, err := c.Query("host = \"raidman\"") - if err != nil { - panic(err) - } - - if len(events) < 1 { - panic("Submitted event not found") - } - - c.Close() -} - -``` diff --git a/Godeps/_workspace/src/github.com/amir/raidman/UNLICENSE b/Godeps/_workspace/src/github.com/amir/raidman/UNLICENSE deleted file mode 100644 index 68a49daad..000000000 --- a/Godeps/_workspace/src/github.com/amir/raidman/UNLICENSE +++ /dev/null @@ -1,24 +0,0 @@ -This is free and unencumbered software released into the public domain. - -Anyone is free to copy, modify, publish, use, compile, sell, or -distribute this software, either in source code form or as a compiled -binary, for any purpose, commercial or non-commercial, and by any -means. - -In jurisdictions that recognize copyright laws, the author or authors -of this software dedicate any and all copyright interest in the -software to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and -successors. We intend this dedication to be an overt act of -relinquishment in perpetuity of all present and future rights to this -software under copyright law. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -For more information, please refer to diff --git a/Godeps/_workspace/src/github.com/amir/raidman/proto/Makefile b/Godeps/_workspace/src/github.com/amir/raidman/proto/Makefile deleted file mode 100644 index 5349cc839..000000000 --- a/Godeps/_workspace/src/github.com/amir/raidman/proto/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -proto.pb.go: proto.proto - mkdir -p _pb - protoc --go_out=_pb $< - cat _pb/$@\ - |gofmt >$@ - rm -rf _pb diff --git a/Godeps/_workspace/src/github.com/amir/raidman/proto/proto.pb.go b/Godeps/_workspace/src/github.com/amir/raidman/proto/proto.pb.go deleted file mode 100644 index 42eea684d..000000000 --- a/Godeps/_workspace/src/github.com/amir/raidman/proto/proto.pb.go +++ /dev/null @@ -1,273 +0,0 @@ -// Code generated by protoc-gen-go. -// source: proto.proto -// DO NOT EDIT! - -package proto - -import proto1 "github.com/golang/protobuf/proto" -import json "encoding/json" -import math "math" - -// Reference proto, json, and math imports to suppress error if they are not otherwise used. -var _ = proto1.Marshal -var _ = &json.SyntaxError{} -var _ = math.Inf - -type State struct { - Time *int64 `protobuf:"varint,1,opt,name=time" json:"time,omitempty"` - State *string `protobuf:"bytes,2,opt,name=state" json:"state,omitempty"` - Service *string `protobuf:"bytes,3,opt,name=service" json:"service,omitempty"` - Host *string `protobuf:"bytes,4,opt,name=host" json:"host,omitempty"` - Description *string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - Once *bool `protobuf:"varint,6,opt,name=once" json:"once,omitempty"` - Tags []string `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"` - Ttl *float32 `protobuf:"fixed32,8,opt,name=ttl" json:"ttl,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *State) Reset() { *this = State{} } -func (this *State) String() string { return proto1.CompactTextString(this) } -func (*State) ProtoMessage() {} - -func (this *State) GetTime() int64 { - if this != nil && this.Time != nil { - return *this.Time - } - return 0 -} - -func (this *State) GetState() string { - if this != nil && this.State != nil { - return *this.State - } - return "" -} - -func (this *State) GetService() string { - if this != nil && this.Service != nil { - return *this.Service - } - return "" -} - -func (this *State) GetHost() string { - if this != nil && this.Host != nil { - return *this.Host - } - return "" -} - -func (this *State) GetDescription() string { - if this != nil && this.Description != nil { - return *this.Description - } - return "" -} - -func (this *State) GetOnce() bool { - if this != nil && this.Once != nil { - return *this.Once - } - return false -} - -func (this *State) GetTags() []string { - if this != nil { - return this.Tags - } - return nil -} - -func (this *State) GetTtl() float32 { - if this != nil && this.Ttl != nil { - return *this.Ttl - } - return 0 -} - -type Event struct { - Time *int64 `protobuf:"varint,1,opt,name=time" json:"time,omitempty"` - State *string `protobuf:"bytes,2,opt,name=state" json:"state,omitempty"` - Service *string `protobuf:"bytes,3,opt,name=service" json:"service,omitempty"` - Host *string `protobuf:"bytes,4,opt,name=host" json:"host,omitempty"` - Description *string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - Tags []string `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"` - Ttl *float32 `protobuf:"fixed32,8,opt,name=ttl" json:"ttl,omitempty"` - Attributes []*Attribute `protobuf:"bytes,9,rep,name=attributes" json:"attributes,omitempty"` - MetricSint64 *int64 `protobuf:"zigzag64,13,opt,name=metric_sint64" json:"metric_sint64,omitempty"` - MetricD *float64 `protobuf:"fixed64,14,opt,name=metric_d" json:"metric_d,omitempty"` - MetricF *float32 `protobuf:"fixed32,15,opt,name=metric_f" json:"metric_f,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *Event) Reset() { *this = Event{} } -func (this *Event) String() string { return proto1.CompactTextString(this) } -func (*Event) ProtoMessage() {} - -func (this *Event) GetTime() int64 { - if this != nil && this.Time != nil { - return *this.Time - } - return 0 -} - -func (this *Event) GetState() string { - if this != nil && this.State != nil { - return *this.State - } - return "" -} - -func (this *Event) GetService() string { - if this != nil && this.Service != nil { - return *this.Service - } - return "" -} - -func (this *Event) GetHost() string { - if this != nil && this.Host != nil { - return *this.Host - } - return "" -} - -func (this *Event) GetDescription() string { - if this != nil && this.Description != nil { - return *this.Description - } - return "" -} - -func (this *Event) GetTags() []string { - if this != nil { - return this.Tags - } - return nil -} - -func (this *Event) GetTtl() float32 { - if this != nil && this.Ttl != nil { - return *this.Ttl - } - return 0 -} - -func (this *Event) GetAttributes() []*Attribute { - if this != nil { - return this.Attributes - } - return nil -} - -func (this *Event) GetMetricSint64() int64 { - if this != nil && this.MetricSint64 != nil { - return *this.MetricSint64 - } - return 0 -} - -func (this *Event) GetMetricD() float64 { - if this != nil && this.MetricD != nil { - return *this.MetricD - } - return 0 -} - -func (this *Event) GetMetricF() float32 { - if this != nil && this.MetricF != nil { - return *this.MetricF - } - return 0 -} - -type Query struct { - String_ *string `protobuf:"bytes,1,opt,name=string" json:"string,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *Query) Reset() { *this = Query{} } -func (this *Query) String() string { return proto1.CompactTextString(this) } -func (*Query) ProtoMessage() {} - -func (this *Query) GetString_() string { - if this != nil && this.String_ != nil { - return *this.String_ - } - return "" -} - -type Msg struct { - Ok *bool `protobuf:"varint,2,opt,name=ok" json:"ok,omitempty"` - Error *string `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"` - States []*State `protobuf:"bytes,4,rep,name=states" json:"states,omitempty"` - Query *Query `protobuf:"bytes,5,opt,name=query" json:"query,omitempty"` - Events []*Event `protobuf:"bytes,6,rep,name=events" json:"events,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *Msg) Reset() { *this = Msg{} } -func (this *Msg) String() string { return proto1.CompactTextString(this) } -func (*Msg) ProtoMessage() {} - -func (this *Msg) GetOk() bool { - if this != nil && this.Ok != nil { - return *this.Ok - } - return false -} - -func (this *Msg) GetError() string { - if this != nil && this.Error != nil { - return *this.Error - } - return "" -} - -func (this *Msg) GetStates() []*State { - if this != nil { - return this.States - } - return nil -} - -func (this *Msg) GetQuery() *Query { - if this != nil { - return this.Query - } - return nil -} - -func (this *Msg) GetEvents() []*Event { - if this != nil { - return this.Events - } - return nil -} - -type Attribute struct { - Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *Attribute) Reset() { *this = Attribute{} } -func (this *Attribute) String() string { return proto1.CompactTextString(this) } -func (*Attribute) ProtoMessage() {} - -func (this *Attribute) GetKey() string { - if this != nil && this.Key != nil { - return *this.Key - } - return "" -} - -func (this *Attribute) GetValue() string { - if this != nil && this.Value != nil { - return *this.Value - } - return "" -} - -func init() { -} diff --git a/Godeps/_workspace/src/github.com/amir/raidman/proto/proto.proto b/Godeps/_workspace/src/github.com/amir/raidman/proto/proto.proto deleted file mode 100644 index 3e946a3a0..000000000 --- a/Godeps/_workspace/src/github.com/amir/raidman/proto/proto.proto +++ /dev/null @@ -1,45 +0,0 @@ -option java_package = "com.aphyr.riemann"; -option java_outer_classname = "Proto"; - -message State { - optional int64 time = 1; - optional string state = 2; - optional string service = 3; - optional string host = 4; - optional string description = 5; - optional bool once = 6; - repeated string tags = 7; - optional float ttl = 8; -} - -message Event { - optional int64 time = 1; - optional string state = 2; - optional string service = 3; - optional string host = 4; - optional string description = 5; - repeated string tags = 7; - optional float ttl = 8; - repeated Attribute attributes = 9; - - optional sint64 metric_sint64 = 13; - optional double metric_d = 14; - optional float metric_f = 15; -} - -message Query { - optional string string = 1; -} - -message Msg { - optional bool ok = 2; - optional string error = 3; - repeated State states = 4; - optional Query query = 5; - repeated Event events = 6; -} - -message Attribute { - required string key = 1; - optional string value = 2; -} diff --git a/Godeps/_workspace/src/github.com/amir/raidman/raidman.go b/Godeps/_workspace/src/github.com/amir/raidman/raidman.go deleted file mode 100644 index 87c82777c..000000000 --- a/Godeps/_workspace/src/github.com/amir/raidman/raidman.go +++ /dev/null @@ -1,313 +0,0 @@ -// Go Riemann client -package raidman - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "net" - "os" - "reflect" - "sync" - "time" - - "github.com/amir/raidman/proto" - pb "github.com/golang/protobuf/proto" -) - -type network interface { - Send(message *proto.Msg, conn net.Conn) (*proto.Msg, error) -} - -type tcp struct{} - -type udp struct{} - -// Client represents a connection to a Riemann server -type Client struct { - sync.Mutex - net network - connection net.Conn - timeout time.Duration -} - -// An Event represents a single Riemann event -type Event struct { - Ttl float32 - Time int64 - Tags []string - Host string // Defaults to os.Hostname() - State string - Service string - Metric interface{} // Could be Int, Float32, Float64 - Description string - Attributes map[string]string -} - -// Dial establishes a connection to a Riemann server at addr, on the network -// netwrk, with a timeout of timeout -// -// Known networks are "tcp", "tcp4", "tcp6", "udp", "udp4", and "udp6". -func DialWithTimeout(netwrk, addr string, timeout time.Duration) (c *Client, err error) { - c = new(Client) - - var cnet network - switch netwrk { - case "tcp", "tcp4", "tcp6": - cnet = new(tcp) - case "udp", "udp4", "udp6": - cnet = new(udp) - default: - return nil, fmt.Errorf("dial %q: unsupported network %q", netwrk, netwrk) - } - - c.net = cnet - c.timeout = timeout - c.connection, err = net.Dial(netwrk, addr) - if err != nil { - return nil, err - } - - return c, nil -} - -// Dial establishes a connection to a Riemann server at addr, on the network -// netwrk. -// -// Known networks are "tcp", "tcp4", "tcp6", "udp", "udp4", and "udp6". -func Dial(netwrk, addr string) (c *Client, err error) { - return DialWithTimeout(netwrk, addr, 0) -} - -func (network *tcp) Send(message *proto.Msg, conn net.Conn) (*proto.Msg, error) { - msg := &proto.Msg{} - data, err := pb.Marshal(message) - if err != nil { - return msg, err - } - b := new(bytes.Buffer) - if err = binary.Write(b, binary.BigEndian, uint32(len(data))); err != nil { - return msg, err - } - if _, err = conn.Write(b.Bytes()); err != nil { - return msg, err - } - if _, err = conn.Write(data); err != nil { - return msg, err - } - var header uint32 - if err = binary.Read(conn, binary.BigEndian, &header); err != nil { - return msg, err - } - response := make([]byte, header) - if err = readFully(conn, response); err != nil { - return msg, err - } - if err = pb.Unmarshal(response, msg); err != nil { - return msg, err - } - if msg.GetOk() != true { - return msg, errors.New(msg.GetError()) - } - return msg, nil -} - -func readFully(r io.Reader, p []byte) error { - for len(p) > 0 { - n, err := r.Read(p) - p = p[n:] - if err != nil { - return err - } - } - return nil -} - -func (network *udp) Send(message *proto.Msg, conn net.Conn) (*proto.Msg, error) { - data, err := pb.Marshal(message) - if err != nil { - return nil, err - } - if _, err = conn.Write(data); err != nil { - return nil, err - } - - return nil, nil -} - -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Map: - return v.IsNil() - case reflect.Slice: - zero := true - for i := 0; i < v.Len(); i++ { - zero = zero && isZero(v.Index(i)) - } - return zero - } - zero := reflect.Zero(v.Type()) - return v.Interface() == zero.Interface() -} - -func eventToPbEvent(event *Event) (*proto.Event, error) { - var e proto.Event - - if event.Host == "" { - event.Host, _ = os.Hostname() - } - t := reflect.ValueOf(&e).Elem() - s := reflect.ValueOf(event).Elem() - typeOfEvent := s.Type() - for i := 0; i < s.NumField(); i++ { - f := s.Field(i) - value := reflect.ValueOf(f.Interface()) - if !isZero(f) { - name := typeOfEvent.Field(i).Name - switch name { - case "State", "Service", "Host", "Description": - tmp := reflect.ValueOf(pb.String(value.String())) - t.FieldByName(name).Set(tmp) - case "Ttl": - tmp := reflect.ValueOf(pb.Float32(float32(value.Float()))) - t.FieldByName(name).Set(tmp) - case "Time": - tmp := reflect.ValueOf(pb.Int64(value.Int())) - t.FieldByName(name).Set(tmp) - case "Tags": - tmp := reflect.ValueOf(value.Interface().([]string)) - t.FieldByName(name).Set(tmp) - case "Metric": - switch reflect.TypeOf(f.Interface()).Kind() { - case reflect.Int: - tmp := reflect.ValueOf(pb.Int64(int64(value.Int()))) - t.FieldByName("MetricSint64").Set(tmp) - case reflect.Int64: - tmp := reflect.ValueOf(pb.Int64(int64(value.Int()))) - t.FieldByName("MetricSint64").Set(tmp) - case reflect.Float32: - tmp := reflect.ValueOf(pb.Float32(float32(value.Float()))) - t.FieldByName("MetricF").Set(tmp) - case reflect.Float64: - tmp := reflect.ValueOf(pb.Float64(value.Float())) - t.FieldByName("MetricD").Set(tmp) - default: - return nil, fmt.Errorf("Metric of invalid type (type %v)", - reflect.TypeOf(f.Interface()).Kind()) - } - case "Attributes": - var attrs []*proto.Attribute - for k, v := range value.Interface().(map[string]string) { - // Copy k,v so we can take - // pointers to the new - // temporaries - k_, v_ := k, v - attrs = append(attrs, &proto.Attribute{ - Key: &k_, - Value: &v_, - }) - } - t.FieldByName(name).Set(reflect.ValueOf(attrs)) - } - } - } - - return &e, nil -} - -func pbEventsToEvents(pbEvents []*proto.Event) []Event { - var events []Event - - for _, event := range pbEvents { - e := Event{ - State: event.GetState(), - Service: event.GetService(), - Host: event.GetHost(), - Description: event.GetDescription(), - Ttl: event.GetTtl(), - Time: event.GetTime(), - Tags: event.GetTags(), - } - if event.MetricF != nil { - e.Metric = event.GetMetricF() - } else if event.MetricD != nil { - e.Metric = event.GetMetricD() - } else { - e.Metric = event.GetMetricSint64() - } - if event.Attributes != nil { - e.Attributes = make(map[string]string, len(event.GetAttributes())) - for _, attr := range event.GetAttributes() { - e.Attributes[attr.GetKey()] = attr.GetValue() - } - } - - events = append(events, e) - } - - return events -} - -// Send sends an event to Riemann -func (c *Client) Send(event *Event) error { - return c.SendMulti([]*Event{event}) -} - -// SendMulti sends multiple events to Riemann -func (c *Client) SendMulti(events []*Event) error { - message := &proto.Msg{} - - for _, event := range events { - e, err := eventToPbEvent(event) - if err != nil { - return err - } - - message.Events = append(message.Events, e) - } - - c.Lock() - defer c.Unlock() - - if c.timeout > 0 { - err := c.connection.SetDeadline(time.Now().Add(c.timeout)) - if err != nil { - return err - } - } - - _, err := c.net.Send(message, c.connection) - if err != nil { - return err - } - - return nil -} - -// Query returns a list of events matched by query -func (c *Client) Query(q string) ([]Event, error) { - switch c.net.(type) { - case *udp: - return nil, errors.New("Querying over UDP is not supported") - } - query := &proto.Query{} - query.String_ = pb.String(q) - message := &proto.Msg{} - message.Query = query - c.Lock() - defer c.Unlock() - response, err := c.net.Send(message, c.connection) - if err != nil { - return nil, err - } - return pbEventsToEvents(response.GetEvents()), nil -} - -// Close closes the connection to Riemann -func (c *Client) Close() { - c.Lock() - c.connection.Close() - c.Unlock() -} diff --git a/Godeps/_workspace/src/github.com/amir/raidman/raidman_test.go b/Godeps/_workspace/src/github.com/amir/raidman/raidman_test.go deleted file mode 100644 index 8a824ee3c..000000000 --- a/Godeps/_workspace/src/github.com/amir/raidman/raidman_test.go +++ /dev/null @@ -1,268 +0,0 @@ -package raidman - -import ( - "reflect" - "testing" -) - -func TestTCP(t *testing.T) { - c, err := Dial("tcp", "localhost:5555") - if err != nil { - t.Fatal(err.Error()) - } - var event = &Event{ - State: "success", - Host: "raidman", - Service: "tcp", - Metric: 42, - Ttl: 1, - Tags: []string{"tcp", "test", "raidman"}, - Attributes: map[string]string{"type": "test"}, - } - - err = c.Send(event) - if err != nil { - t.Error(err.Error()) - } - - events, err := c.Query("tagged \"test\"") - if err != nil { - t.Error(err.Error()) - } - - if len(events) < 1 { - t.Error("Submitted event not found") - } - - testAttributeExists := false - for _, event := range events { - if val, ok := event.Attributes["type"]; ok && val == "test" { - testAttributeExists = true - } - } - - if !testAttributeExists { - t.Error("Attribute \"type\" is missing") - } - - c.Close() -} - -func TestMultiTCP(t *testing.T) { - c, err := Dial("tcp", "localhost:5555") - if err != nil { - t.Fatal(err.Error()) - } - - err = c.SendMulti([]*Event{ - &Event{ - State: "success", - Host: "raidman", - Service: "tcp-multi-1", - Metric: 42, - Ttl: 1, - Tags: []string{"tcp", "test", "raidman", "multi"}, - Attributes: map[string]string{"type": "test"}, - }, - &Event{ - State: "success", - Host: "raidman", - Service: "tcp-multi-2", - Metric: 42, - Ttl: 1, - Tags: []string{"tcp", "test", "raidman", "multi"}, - Attributes: map[string]string{"type": "test"}, - }, - }) - if err != nil { - t.Error(err.Error()) - } - - events, err := c.Query("tagged \"test\" and tagged \"multi\"") - if err != nil { - t.Error(err.Error()) - } - - if len(events) != 2 { - t.Error("Submitted event not found") - } - - c.Close() -} - -func TestMetricIsInt64(t *testing.T) { - c, err := Dial("tcp", "localhost:5555") - if err != nil { - t.Fatal(err.Error()) - } - - var int64metric int64 = 9223372036854775807 - - var event = &Event{ - State: "success", - Host: "raidman", - Service: "tcp", - Metric: int64metric, - Ttl: 1, - Tags: []string{"tcp", "test", "raidman"}, - Attributes: map[string]string{"type": "test"}, - } - - err = c.Send(event) - if err != nil { - t.Error(err.Error()) - } -} - -func TestUDP(t *testing.T) { - c, err := Dial("udp", "localhost:5555") - if err != nil { - t.Fatal(err.Error()) - } - var event = &Event{ - State: "warning", - Host: "raidman", - Service: "udp", - Metric: 3.4, - Ttl: 10.7, - } - - err = c.Send(event) - if err != nil { - t.Error(err.Error()) - } - c.Close() -} - -func TestTCPWithoutHost(t *testing.T) { - c, err := Dial("tcp", "localhost:5555") - if err != nil { - t.Fatal(err.Error()) - } - defer c.Close() - - var event = &Event{ - State: "success", - Service: "tcp-host-not-set", - Ttl: 5, - } - - err = c.Send(event) - if err != nil { - t.Error(err.Error()) - } - - events, err := c.Query("service = \"tcp-host-not-set\"") - if err != nil { - t.Error(err.Error()) - } - - if len(events) < 1 { - t.Error("Submitted event not found") - } - - for _, e := range events { - if e.Host == "" { - t.Error("Default host name is not set") - } - } -} - -func TestIsZero(t *testing.T) { - event := &Event{ - Time: 1, - } - elem := reflect.ValueOf(event).Elem() - eventType := elem.Type() - for i := 0; i < elem.NumField(); i++ { - field := elem.Field(i) - name := eventType.Field(i).Name - if name == "Time" { - if isZero(field) { - t.Error("Time should not be zero") - } - } else { - if !isZero(field) { - t.Errorf("%s should be zero", name) - } - } - } -} - -func BenchmarkTCP(b *testing.B) { - c, err := Dial("tcp", "localhost:5555") - - var event = &Event{ - State: "good", - Host: "raidman", - Service: "benchmark", - } - - if err == nil { - for i := 0; i < b.N; i++ { - c.Send(event) - } - } - c.Close() -} - -func BenchmarkUDP(b *testing.B) { - c, err := Dial("udp", "localhost:5555") - - var event = &Event{ - State: "good", - Host: "raidman", - Service: "benchmark", - } - - if err == nil { - for i := 0; i < b.N; i++ { - c.Send(event) - } - } - c.Close() -} - -func BenchmarkConcurrentTCP(b *testing.B) { - c, err := Dial("tcp", "localhost:5555") - - var event = &Event{ - Host: "raidman", - Service: "tcp_concurrent", - Tags: []string{"concurrent", "tcp", "benchmark"}, - } - - ch := make(chan int, b.N) - for i := 0; i < b.N; i++ { - go func(metric int) { - event.Metric = metric - err = c.Send(event) - ch <- i - }(i) - } - <-ch - - c.Close() -} - -func BenchmarkConcurrentUDP(b *testing.B) { - c, err := Dial("udp", "localhost:5555") - - var event = &Event{ - Host: "raidman", - Service: "udp_concurrent", - Tags: []string{"concurrent", "udp", "benchmark"}, - } - - ch := make(chan int, b.N) - for i := 0; i < b.N; i++ { - go func(metric int) { - event.Metric = metric - err = c.Send(event) - ch <- i - }(i) - } - <-ch - - c.Close() -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/.gitignore b/Godeps/_workspace/src/github.com/armon/go-metrics/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/LICENSE b/Godeps/_workspace/src/github.com/armon/go-metrics/LICENSE deleted file mode 100644 index 106569e54..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/README.md b/Godeps/_workspace/src/github.com/armon/go-metrics/README.md deleted file mode 100644 index 7b6f23e29..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/README.md +++ /dev/null @@ -1,71 +0,0 @@ -go-metrics -========== - -This library provides a `metrics` package which can be used to instrument code, -expose application metrics, and profile runtime performance in a flexible manner. - -Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics) - -Sinks -===== - -The `metrics` package makes use of a `MetricSink` interface to support delivery -to any type of backend. Currently the following sinks are provided: - -* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP) -* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP) -* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes) -* InmemSink : Provides in-memory aggregation, can be used to export stats -* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. -* BlackholeSink : Sinks to nowhere - -In addition to the sinks, the `InmemSignal` can be used to catch a signal, -and dump a formatted output of recent metrics. For example, when a process gets -a SIGUSR1, it can dump to stderr recent performance metrics for debugging. - -Examples -======== - -Here is an example of using the package: - - func SlowMethod() { - // Profiling the runtime of a method - defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) - } - - // Configure a statsite sink as the global metrics sink - sink, _ := metrics.NewStatsiteSink("statsite:8125") - metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) - - // Emit a Key/Value pair - metrics.EmitKey([]string{"questions", "meaning of life"}, 42) - - -Here is an example of setting up an signal handler: - - // Setup the inmem sink and signal handler - inm := metrics.NewInmemSink(10*time.Second, time.Minute) - sig := metrics.DefaultInmemSignal(inm) - metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) - - // Run some code - inm.SetGauge([]string{"foo"}, 42) - inm.EmitKey([]string{"bar"}, 30) - - inm.IncrCounter([]string{"baz"}, 42) - inm.IncrCounter([]string{"baz"}, 1) - inm.IncrCounter([]string{"baz"}, 80) - - inm.AddSample([]string{"method", "wow"}, 42) - inm.AddSample([]string{"method", "wow"}, 100) - inm.AddSample([]string{"method", "wow"}, 22) - - .... - -When a signal comes in, output like the following will be dumped to stderr: - - [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 - [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 - [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 - [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 - diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/const_unix.go b/Godeps/_workspace/src/github.com/armon/go-metrics/const_unix.go deleted file mode 100644 index 31098dd57..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/const_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows - -package metrics - -import ( - "syscall" -) - -const ( - // DefaultSignal is used with DefaultInmemSignal - DefaultSignal = syscall.SIGUSR1 -) diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/const_windows.go b/Godeps/_workspace/src/github.com/armon/go-metrics/const_windows.go deleted file mode 100644 index 38136af3e..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/const_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package metrics - -import ( - "syscall" -) - -const ( - // DefaultSignal is used with DefaultInmemSignal - // Windows has no SIGUSR1, use SIGBREAK - DefaultSignal = syscall.Signal(21) -) diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem.go b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem.go deleted file mode 100644 index da5032960..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem.go +++ /dev/null @@ -1,241 +0,0 @@ -package metrics - -import ( - "fmt" - "math" - "strings" - "sync" - "time" -) - -// InmemSink provides a MetricSink that does in-memory aggregation -// without sending metrics over a network. It can be embedded within -// an application to provide profiling information. -type InmemSink struct { - // How long is each aggregation interval - interval time.Duration - - // Retain controls how many metrics interval we keep - retain time.Duration - - // maxIntervals is the maximum length of intervals. - // It is retain / interval. - maxIntervals int - - // intervals is a slice of the retained intervals - intervals []*IntervalMetrics - intervalLock sync.RWMutex -} - -// IntervalMetrics stores the aggregated metrics -// for a specific interval -type IntervalMetrics struct { - sync.RWMutex - - // The start time of the interval - Interval time.Time - - // Gauges maps the key to the last set value - Gauges map[string]float32 - - // Points maps the string to the list of emitted values - // from EmitKey - Points map[string][]float32 - - // Counters maps the string key to a sum of the counter - // values - Counters map[string]*AggregateSample - - // Samples maps the key to an AggregateSample, - // which has the rolled up view of a sample - Samples map[string]*AggregateSample -} - -// NewIntervalMetrics creates a new IntervalMetrics for a given interval -func NewIntervalMetrics(intv time.Time) *IntervalMetrics { - return &IntervalMetrics{ - Interval: intv, - Gauges: make(map[string]float32), - Points: make(map[string][]float32), - Counters: make(map[string]*AggregateSample), - Samples: make(map[string]*AggregateSample), - } -} - -// AggregateSample is used to hold aggregate metrics -// about a sample -type AggregateSample struct { - Count int // The count of emitted pairs - Sum float64 // The sum of values - SumSq float64 // The sum of squared values - Min float64 // Minimum value - Max float64 // Maximum value - LastUpdated time.Time // When value was last updated -} - -// Computes a Stddev of the values -func (a *AggregateSample) Stddev() float64 { - num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) - div := float64(a.Count * (a.Count - 1)) - if div == 0 { - return 0 - } - return math.Sqrt(num / div) -} - -// Computes a mean of the values -func (a *AggregateSample) Mean() float64 { - if a.Count == 0 { - return 0 - } - return a.Sum / float64(a.Count) -} - -// Ingest is used to update a sample -func (a *AggregateSample) Ingest(v float64) { - a.Count++ - a.Sum += v - a.SumSq += (v * v) - if v < a.Min || a.Count == 1 { - a.Min = v - } - if v > a.Max || a.Count == 1 { - a.Max = v - } - a.LastUpdated = time.Now() -} - -func (a *AggregateSample) String() string { - if a.Count == 0 { - return "Count: 0" - } else if a.Stddev() == 0 { - return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated) - } else { - return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s", - a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated) - } -} - -// NewInmemSink is used to construct a new in-memory sink. -// Uses an aggregation interval and maximum retention period. -func NewInmemSink(interval, retain time.Duration) *InmemSink { - i := &InmemSink{ - interval: interval, - retain: retain, - maxIntervals: int(retain / interval), - } - i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) - return i -} - -func (i *InmemSink) SetGauge(key []string, val float32) { - k := i.flattenKey(key) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - intv.Gauges[k] = val -} - -func (i *InmemSink) EmitKey(key []string, val float32) { - k := i.flattenKey(key) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - vals := intv.Points[k] - intv.Points[k] = append(vals, val) -} - -func (i *InmemSink) IncrCounter(key []string, val float32) { - k := i.flattenKey(key) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - - agg := intv.Counters[k] - if agg == nil { - agg = &AggregateSample{} - intv.Counters[k] = agg - } - agg.Ingest(float64(val)) -} - -func (i *InmemSink) AddSample(key []string, val float32) { - k := i.flattenKey(key) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - - agg := intv.Samples[k] - if agg == nil { - agg = &AggregateSample{} - intv.Samples[k] = agg - } - agg.Ingest(float64(val)) -} - -// Data is used to retrieve all the aggregated metrics -// Intervals may be in use, and a read lock should be acquired -func (i *InmemSink) Data() []*IntervalMetrics { - // Get the current interval, forces creation - i.getInterval() - - i.intervalLock.RLock() - defer i.intervalLock.RUnlock() - - intervals := make([]*IntervalMetrics, len(i.intervals)) - copy(intervals, i.intervals) - return intervals -} - -func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics { - i.intervalLock.RLock() - defer i.intervalLock.RUnlock() - - n := len(i.intervals) - if n > 0 && i.intervals[n-1].Interval == intv { - return i.intervals[n-1] - } - return nil -} - -func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics { - i.intervalLock.Lock() - defer i.intervalLock.Unlock() - - // Check for an existing interval - n := len(i.intervals) - if n > 0 && i.intervals[n-1].Interval == intv { - return i.intervals[n-1] - } - - // Add the current interval - current := NewIntervalMetrics(intv) - i.intervals = append(i.intervals, current) - n++ - - // Truncate the intervals if they are too long - if n >= i.maxIntervals { - copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) - i.intervals = i.intervals[:i.maxIntervals] - } - return current -} - -// getInterval returns the current interval to write to -func (i *InmemSink) getInterval() *IntervalMetrics { - intv := time.Now().Truncate(i.interval) - if m := i.getExistingInterval(intv); m != nil { - return m - } - return i.createInterval(intv) -} - -// Flattens the key for formatting, removes spaces -func (i *InmemSink) flattenKey(parts []string) string { - joined := strings.Join(parts, ".") - return strings.Replace(joined, " ", "_", -1) -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal.go b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal.go deleted file mode 100644 index 95d08ee10..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal.go +++ /dev/null @@ -1,100 +0,0 @@ -package metrics - -import ( - "bytes" - "fmt" - "io" - "os" - "os/signal" - "sync" - "syscall" -) - -// InmemSignal is used to listen for a given signal, and when received, -// to dump the current metrics from the InmemSink to an io.Writer -type InmemSignal struct { - signal syscall.Signal - inm *InmemSink - w io.Writer - sigCh chan os.Signal - - stop bool - stopCh chan struct{} - stopLock sync.Mutex -} - -// NewInmemSignal creates a new InmemSignal which listens for a given signal, -// and dumps the current metrics out to a writer -func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { - i := &InmemSignal{ - signal: sig, - inm: inmem, - w: w, - sigCh: make(chan os.Signal, 1), - stopCh: make(chan struct{}), - } - signal.Notify(i.sigCh, sig) - go i.run() - return i -} - -// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 -// and writes output to stderr. Windows uses SIGBREAK -func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { - return NewInmemSignal(inmem, DefaultSignal, os.Stderr) -} - -// Stop is used to stop the InmemSignal from listening -func (i *InmemSignal) Stop() { - i.stopLock.Lock() - defer i.stopLock.Unlock() - - if i.stop { - return - } - i.stop = true - close(i.stopCh) - signal.Stop(i.sigCh) -} - -// run is a long running routine that handles signals -func (i *InmemSignal) run() { - for { - select { - case <-i.sigCh: - i.dumpStats() - case <-i.stopCh: - return - } - } -} - -// dumpStats is used to dump the data to output writer -func (i *InmemSignal) dumpStats() { - buf := bytes.NewBuffer(nil) - - data := i.inm.Data() - // Skip the last period which is still being aggregated - for i := 0; i < len(data)-1; i++ { - intv := data[i] - intv.RLock() - for name, val := range intv.Gauges { - fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val) - } - for name, vals := range intv.Points { - for _, val := range vals { - fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) - } - } - for name, agg := range intv.Counters { - fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg) - } - for name, agg := range intv.Samples { - fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg) - } - intv.RUnlock() - } - - // Write out the bytes - i.w.Write(buf.Bytes()) -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal_test.go deleted file mode 100644 index 9bbca5f25..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_signal_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package metrics - -import ( - "bytes" - "os" - "strings" - "syscall" - "testing" - "time" -) - -func TestInmemSignal(t *testing.T) { - buf := bytes.NewBuffer(nil) - inm := NewInmemSink(10*time.Millisecond, 50*time.Millisecond) - sig := NewInmemSignal(inm, syscall.SIGUSR1, buf) - defer sig.Stop() - - inm.SetGauge([]string{"foo"}, 42) - inm.EmitKey([]string{"bar"}, 42) - inm.IncrCounter([]string{"baz"}, 42) - inm.AddSample([]string{"wow"}, 42) - - // Wait for period to end - time.Sleep(15 * time.Millisecond) - - // Send signal! - syscall.Kill(os.Getpid(), syscall.SIGUSR1) - - // Wait for flush - time.Sleep(10 * time.Millisecond) - - // Check the output - out := string(buf.Bytes()) - if !strings.Contains(out, "[G] 'foo': 42") { - t.Fatalf("bad: %v", out) - } - if !strings.Contains(out, "[P] 'bar': 42") { - t.Fatalf("bad: %v", out) - } - if !strings.Contains(out, "[C] 'baz': Count: 1 Sum: 42") { - t.Fatalf("bad: %v", out) - } - if !strings.Contains(out, "[S] 'wow': Count: 1 Sum: 42") { - t.Fatalf("bad: %v", out) - } -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_test.go deleted file mode 100644 index 228a2fc1a..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/inmem_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package metrics - -import ( - "math" - "testing" - "time" -) - -func TestInmemSink(t *testing.T) { - inm := NewInmemSink(10*time.Millisecond, 50*time.Millisecond) - - data := inm.Data() - if len(data) != 1 { - t.Fatalf("bad: %v", data) - } - - // Add data points - inm.SetGauge([]string{"foo", "bar"}, 42) - inm.EmitKey([]string{"foo", "bar"}, 42) - inm.IncrCounter([]string{"foo", "bar"}, 20) - inm.IncrCounter([]string{"foo", "bar"}, 22) - inm.AddSample([]string{"foo", "bar"}, 20) - inm.AddSample([]string{"foo", "bar"}, 22) - - data = inm.Data() - if len(data) != 1 { - t.Fatalf("bad: %v", data) - } - - intvM := data[0] - intvM.RLock() - - if time.Now().Sub(intvM.Interval) > 10*time.Millisecond { - t.Fatalf("interval too old") - } - if intvM.Gauges["foo.bar"] != 42 { - t.Fatalf("bad val: %v", intvM.Gauges) - } - if intvM.Points["foo.bar"][0] != 42 { - t.Fatalf("bad val: %v", intvM.Points) - } - - agg := intvM.Counters["foo.bar"] - if agg.Count != 2 { - t.Fatalf("bad val: %v", agg) - } - if agg.Sum != 42 { - t.Fatalf("bad val: %v", agg) - } - if agg.SumSq != 884 { - t.Fatalf("bad val: %v", agg) - } - if agg.Min != 20 { - t.Fatalf("bad val: %v", agg) - } - if agg.Max != 22 { - t.Fatalf("bad val: %v", agg) - } - if agg.Mean() != 21 { - t.Fatalf("bad val: %v", agg) - } - if agg.Stddev() != math.Sqrt(2) { - t.Fatalf("bad val: %v", agg) - } - - if agg.LastUpdated.IsZero() { - t.Fatalf("agg.LastUpdated is not set: %v", agg) - } - - diff := time.Now().Sub(agg.LastUpdated).Seconds() - if diff > 1 { - t.Fatalf("time diff too great: %f", diff) - } - - if agg = intvM.Samples["foo.bar"]; agg == nil { - t.Fatalf("missing sample") - } - - intvM.RUnlock() - - for i := 1; i < 10; i++ { - time.Sleep(10 * time.Millisecond) - inm.SetGauge([]string{"foo", "bar"}, 42) - data = inm.Data() - if len(data) != min(i+1, 5) { - t.Fatalf("bad: %v", data) - } - } - - // Should not exceed 5 intervals! - time.Sleep(10 * time.Millisecond) - inm.SetGauge([]string{"foo", "bar"}, 42) - data = inm.Data() - if len(data) != 5 { - t.Fatalf("bad: %v", data) - } -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/metrics.go b/Godeps/_workspace/src/github.com/armon/go-metrics/metrics.go deleted file mode 100644 index b818e4182..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/metrics.go +++ /dev/null @@ -1,115 +0,0 @@ -package metrics - -import ( - "runtime" - "time" -) - -func (m *Metrics) SetGauge(key []string, val float32) { - if m.HostName != "" && m.EnableHostname { - key = insert(0, m.HostName, key) - } - if m.EnableTypePrefix { - key = insert(0, "gauge", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - m.sink.SetGauge(key, val) -} - -func (m *Metrics) EmitKey(key []string, val float32) { - if m.EnableTypePrefix { - key = insert(0, "kv", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - m.sink.EmitKey(key, val) -} - -func (m *Metrics) IncrCounter(key []string, val float32) { - if m.EnableTypePrefix { - key = insert(0, "counter", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - m.sink.IncrCounter(key, val) -} - -func (m *Metrics) AddSample(key []string, val float32) { - if m.EnableTypePrefix { - key = insert(0, "sample", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - m.sink.AddSample(key, val) -} - -func (m *Metrics) MeasureSince(key []string, start time.Time) { - if m.EnableTypePrefix { - key = insert(0, "timer", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - now := time.Now() - elapsed := now.Sub(start) - msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) - m.sink.AddSample(key, msec) -} - -// Periodically collects runtime stats to publish -func (m *Metrics) collectStats() { - for { - time.Sleep(m.ProfileInterval) - m.emitRuntimeStats() - } -} - -// Emits various runtime statsitics -func (m *Metrics) emitRuntimeStats() { - // Export number of Goroutines - numRoutines := runtime.NumGoroutine() - m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) - - // Export memory stats - var stats runtime.MemStats - runtime.ReadMemStats(&stats) - m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) - m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) - m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) - m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) - m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) - m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) - m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) - - // Export info about the last few GC runs - num := stats.NumGC - - // Handle wrap around - if num < m.lastNumGC { - m.lastNumGC = 0 - } - - // Ensure we don't scan more than 256 - if num-m.lastNumGC >= 256 { - m.lastNumGC = num - 255 - } - - for i := m.lastNumGC; i < num; i++ { - pause := stats.PauseNs[i%256] - m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) - } - m.lastNumGC = num -} - -// Inserts a string value at an index into the slice -func insert(i int, v string, s []string) []string { - s = append(s, "") - copy(s[i+1:], s[i:]) - s[i] = v - return s -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/metrics_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/metrics_test.go deleted file mode 100644 index c7baf22bf..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/metrics_test.go +++ /dev/null @@ -1,262 +0,0 @@ -package metrics - -import ( - "reflect" - "runtime" - "testing" - "time" -) - -func mockMetric() (*MockSink, *Metrics) { - m := &MockSink{} - met := &Metrics{sink: m} - return m, met -} - -func TestMetrics_SetGauge(t *testing.T) { - m, met := mockMetric() - met.SetGauge([]string{"key"}, float32(1)) - if m.keys[0][0] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.HostName = "test" - met.EnableHostname = true - met.SetGauge([]string{"key"}, float32(1)) - if m.keys[0][0] != "test" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.EnableTypePrefix = true - met.SetGauge([]string{"key"}, float32(1)) - if m.keys[0][0] != "gauge" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.ServiceName = "service" - met.SetGauge([]string{"key"}, float32(1)) - if m.keys[0][0] != "service" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } -} - -func TestMetrics_EmitKey(t *testing.T) { - m, met := mockMetric() - met.EmitKey([]string{"key"}, float32(1)) - if m.keys[0][0] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.EnableTypePrefix = true - met.EmitKey([]string{"key"}, float32(1)) - if m.keys[0][0] != "kv" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.ServiceName = "service" - met.EmitKey([]string{"key"}, float32(1)) - if m.keys[0][0] != "service" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } -} - -func TestMetrics_IncrCounter(t *testing.T) { - m, met := mockMetric() - met.IncrCounter([]string{"key"}, float32(1)) - if m.keys[0][0] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.EnableTypePrefix = true - met.IncrCounter([]string{"key"}, float32(1)) - if m.keys[0][0] != "counter" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.ServiceName = "service" - met.IncrCounter([]string{"key"}, float32(1)) - if m.keys[0][0] != "service" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } -} - -func TestMetrics_AddSample(t *testing.T) { - m, met := mockMetric() - met.AddSample([]string{"key"}, float32(1)) - if m.keys[0][0] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.EnableTypePrefix = true - met.AddSample([]string{"key"}, float32(1)) - if m.keys[0][0] != "sample" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.ServiceName = "service" - met.AddSample([]string{"key"}, float32(1)) - if m.keys[0][0] != "service" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] != 1 { - t.Fatalf("") - } -} - -func TestMetrics_MeasureSince(t *testing.T) { - m, met := mockMetric() - met.TimerGranularity = time.Millisecond - n := time.Now() - met.MeasureSince([]string{"key"}, n) - if m.keys[0][0] != "key" { - t.Fatalf("") - } - if m.vals[0] > 0.1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.TimerGranularity = time.Millisecond - met.EnableTypePrefix = true - met.MeasureSince([]string{"key"}, n) - if m.keys[0][0] != "timer" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] > 0.1 { - t.Fatalf("") - } - - m, met = mockMetric() - met.TimerGranularity = time.Millisecond - met.ServiceName = "service" - met.MeasureSince([]string{"key"}, n) - if m.keys[0][0] != "service" || m.keys[0][1] != "key" { - t.Fatalf("") - } - if m.vals[0] > 0.1 { - t.Fatalf("") - } -} - -func TestMetrics_EmitRuntimeStats(t *testing.T) { - runtime.GC() - m, met := mockMetric() - met.emitRuntimeStats() - - if m.keys[0][0] != "runtime" || m.keys[0][1] != "num_goroutines" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[0] <= 1 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[1][0] != "runtime" || m.keys[1][1] != "alloc_bytes" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[1] <= 40000 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[2][0] != "runtime" || m.keys[2][1] != "sys_bytes" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[2] <= 100000 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[3][0] != "runtime" || m.keys[3][1] != "malloc_count" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[3] <= 100 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[4][0] != "runtime" || m.keys[4][1] != "free_count" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[4] <= 100 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[5][0] != "runtime" || m.keys[5][1] != "heap_objects" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[5] <= 100 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[6][0] != "runtime" || m.keys[6][1] != "total_gc_pause_ns" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[6] <= 100000 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[7][0] != "runtime" || m.keys[7][1] != "total_gc_runs" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[7] <= 1 { - t.Fatalf("bad val: %v", m.vals) - } - - if m.keys[8][0] != "runtime" || m.keys[8][1] != "gc_pause_ns" { - t.Fatalf("bad key %v", m.keys) - } - if m.vals[8] <= 1000 { - t.Fatalf("bad val: %v", m.vals) - } -} - -func TestInsert(t *testing.T) { - k := []string{"hi", "bob"} - exp := []string{"hi", "there", "bob"} - out := insert(1, "there", k) - if !reflect.DeepEqual(exp, out) { - t.Fatalf("bad insert %v %v", exp, out) - } -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/prometheus/prometheus.go b/Godeps/_workspace/src/github.com/armon/go-metrics/prometheus/prometheus.go deleted file mode 100644 index 362dbfb62..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/prometheus/prometheus.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build go1.3 -package prometheus - -import ( - "strings" - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -type PrometheusSink struct { - mu sync.Mutex - gauges map[string]prometheus.Gauge - summaries map[string]prometheus.Summary - counters map[string]prometheus.Counter -} - -func NewPrometheusSink() (*PrometheusSink, error) { - return &PrometheusSink{ - gauges: make(map[string]prometheus.Gauge), - summaries: make(map[string]prometheus.Summary), - counters: make(map[string]prometheus.Counter), - }, nil -} - -func (p *PrometheusSink) flattenKey(parts []string) string { - joined := strings.Join(parts, "_") - joined = strings.Replace(joined, " ", "_", -1) - joined = strings.Replace(joined, ".", "_", -1) - joined = strings.Replace(joined, "-", "_", -1) - return joined -} - -func (p *PrometheusSink) SetGauge(parts []string, val float32) { - p.mu.Lock() - defer p.mu.Unlock() - key := p.flattenKey(parts) - g, ok := p.gauges[key] - if !ok { - g = prometheus.NewGauge(prometheus.GaugeOpts{ - Name: key, - Help: key, - }) - prometheus.MustRegister(g) - p.gauges[key] = g - } - g.Set(float64(val)) -} - -func (p *PrometheusSink) AddSample(parts []string, val float32) { - p.mu.Lock() - defer p.mu.Unlock() - key := p.flattenKey(parts) - g, ok := p.summaries[key] - if !ok { - g = prometheus.NewSummary(prometheus.SummaryOpts{ - Name: key, - Help: key, - MaxAge: 10 * time.Second, - }) - prometheus.MustRegister(g) - p.summaries[key] = g - } - g.Observe(float64(val)) -} - -// EmitKey is not implemented. Prometheus doesn’t offer a type for which an -// arbitrary number of values is retained, as Prometheus works with a pull -// model, rather than a push model. -func (p *PrometheusSink) EmitKey(key []string, val float32) { -} - -func (p *PrometheusSink) IncrCounter(parts []string, val float32) { - p.mu.Lock() - defer p.mu.Unlock() - key := p.flattenKey(parts) - g, ok := p.counters[key] - if !ok { - g = prometheus.NewCounter(prometheus.CounterOpts{ - Name: key, - Help: key, - }) - prometheus.MustRegister(g) - p.counters[key] = g - } - g.Add(float64(val)) -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/sink.go b/Godeps/_workspace/src/github.com/armon/go-metrics/sink.go deleted file mode 100644 index 0c240c2c4..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/sink.go +++ /dev/null @@ -1,52 +0,0 @@ -package metrics - -// The MetricSink interface is used to transmit metrics information -// to an external system -type MetricSink interface { - // A Gauge should retain the last value it is set to - SetGauge(key []string, val float32) - - // Should emit a Key/Value pair for each call - EmitKey(key []string, val float32) - - // Counters should accumulate values - IncrCounter(key []string, val float32) - - // Samples are for timing information, where quantiles are used - AddSample(key []string, val float32) -} - -// BlackholeSink is used to just blackhole messages -type BlackholeSink struct{} - -func (*BlackholeSink) SetGauge(key []string, val float32) {} -func (*BlackholeSink) EmitKey(key []string, val float32) {} -func (*BlackholeSink) IncrCounter(key []string, val float32) {} -func (*BlackholeSink) AddSample(key []string, val float32) {} - -// FanoutSink is used to sink to fanout values to multiple sinks -type FanoutSink []MetricSink - -func (fh FanoutSink) SetGauge(key []string, val float32) { - for _, s := range fh { - s.SetGauge(key, val) - } -} - -func (fh FanoutSink) EmitKey(key []string, val float32) { - for _, s := range fh { - s.EmitKey(key, val) - } -} - -func (fh FanoutSink) IncrCounter(key []string, val float32) { - for _, s := range fh { - s.IncrCounter(key, val) - } -} - -func (fh FanoutSink) AddSample(key []string, val float32) { - for _, s := range fh { - s.AddSample(key, val) - } -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/sink_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/sink_test.go deleted file mode 100644 index 15c5d771a..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/sink_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package metrics - -import ( - "reflect" - "testing" -) - -type MockSink struct { - keys [][]string - vals []float32 -} - -func (m *MockSink) SetGauge(key []string, val float32) { - m.keys = append(m.keys, key) - m.vals = append(m.vals, val) -} -func (m *MockSink) EmitKey(key []string, val float32) { - m.keys = append(m.keys, key) - m.vals = append(m.vals, val) -} -func (m *MockSink) IncrCounter(key []string, val float32) { - m.keys = append(m.keys, key) - m.vals = append(m.vals, val) -} -func (m *MockSink) AddSample(key []string, val float32) { - m.keys = append(m.keys, key) - m.vals = append(m.vals, val) -} - -func TestFanoutSink_Gauge(t *testing.T) { - m1 := &MockSink{} - m2 := &MockSink{} - fh := &FanoutSink{m1, m2} - - k := []string{"test"} - v := float32(42.0) - fh.SetGauge(k, v) - - if !reflect.DeepEqual(m1.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m2.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m1.vals[0], v) { - t.Fatalf("val not equal") - } - if !reflect.DeepEqual(m2.vals[0], v) { - t.Fatalf("val not equal") - } -} - -func TestFanoutSink_Key(t *testing.T) { - m1 := &MockSink{} - m2 := &MockSink{} - fh := &FanoutSink{m1, m2} - - k := []string{"test"} - v := float32(42.0) - fh.EmitKey(k, v) - - if !reflect.DeepEqual(m1.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m2.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m1.vals[0], v) { - t.Fatalf("val not equal") - } - if !reflect.DeepEqual(m2.vals[0], v) { - t.Fatalf("val not equal") - } -} - -func TestFanoutSink_Counter(t *testing.T) { - m1 := &MockSink{} - m2 := &MockSink{} - fh := &FanoutSink{m1, m2} - - k := []string{"test"} - v := float32(42.0) - fh.IncrCounter(k, v) - - if !reflect.DeepEqual(m1.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m2.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m1.vals[0], v) { - t.Fatalf("val not equal") - } - if !reflect.DeepEqual(m2.vals[0], v) { - t.Fatalf("val not equal") - } -} - -func TestFanoutSink_Sample(t *testing.T) { - m1 := &MockSink{} - m2 := &MockSink{} - fh := &FanoutSink{m1, m2} - - k := []string{"test"} - v := float32(42.0) - fh.AddSample(k, v) - - if !reflect.DeepEqual(m1.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m2.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m1.vals[0], v) { - t.Fatalf("val not equal") - } - if !reflect.DeepEqual(m2.vals[0], v) { - t.Fatalf("val not equal") - } -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/start.go b/Godeps/_workspace/src/github.com/armon/go-metrics/start.go deleted file mode 100644 index 44113f100..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/start.go +++ /dev/null @@ -1,95 +0,0 @@ -package metrics - -import ( - "os" - "time" -) - -// Config is used to configure metrics settings -type Config struct { - ServiceName string // Prefixed with keys to seperate services - HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname - EnableHostname bool // Enable prefixing gauge values with hostname - EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) - EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") - TimerGranularity time.Duration // Granularity of timers. - ProfileInterval time.Duration // Interval to profile runtime metrics -} - -// Metrics represents an instance of a metrics sink that can -// be used to emit -type Metrics struct { - Config - lastNumGC uint32 - sink MetricSink -} - -// Shared global metrics instance -var globalMetrics *Metrics - -func init() { - // Initialize to a blackhole sink to avoid errors - globalMetrics = &Metrics{sink: &BlackholeSink{}} -} - -// DefaultConfig provides a sane default configuration -func DefaultConfig(serviceName string) *Config { - c := &Config{ - ServiceName: serviceName, // Use client provided service - HostName: "", - EnableHostname: true, // Enable hostname prefix - EnableRuntimeMetrics: true, // Enable runtime profiling - EnableTypePrefix: false, // Disable type prefix - TimerGranularity: time.Millisecond, // Timers are in milliseconds - ProfileInterval: time.Second, // Poll runtime every second - } - - // Try to get the hostname - name, _ := os.Hostname() - c.HostName = name - return c -} - -// New is used to create a new instance of Metrics -func New(conf *Config, sink MetricSink) (*Metrics, error) { - met := &Metrics{} - met.Config = *conf - met.sink = sink - - // Start the runtime collector - if conf.EnableRuntimeMetrics { - go met.collectStats() - } - return met, nil -} - -// NewGlobal is the same as New, but it assigns the metrics object to be -// used globally as well as returning it. -func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { - metrics, err := New(conf, sink) - if err == nil { - globalMetrics = metrics - } - return metrics, err -} - -// Proxy all the methods to the globalMetrics instance -func SetGauge(key []string, val float32) { - globalMetrics.SetGauge(key, val) -} - -func EmitKey(key []string, val float32) { - globalMetrics.EmitKey(key, val) -} - -func IncrCounter(key []string, val float32) { - globalMetrics.IncrCounter(key, val) -} - -func AddSample(key []string, val float32) { - globalMetrics.AddSample(key, val) -} - -func MeasureSince(key []string, start time.Time) { - globalMetrics.MeasureSince(key, start) -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/start_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/start_test.go deleted file mode 100644 index 8b3210c15..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/start_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package metrics - -import ( - "reflect" - "testing" - "time" -) - -func TestDefaultConfig(t *testing.T) { - conf := DefaultConfig("service") - if conf.ServiceName != "service" { - t.Fatalf("Bad name") - } - if conf.HostName == "" { - t.Fatalf("missing hostname") - } - if !conf.EnableHostname || !conf.EnableRuntimeMetrics { - t.Fatalf("expect true") - } - if conf.EnableTypePrefix { - t.Fatalf("expect false") - } - if conf.TimerGranularity != time.Millisecond { - t.Fatalf("bad granularity") - } - if conf.ProfileInterval != time.Second { - t.Fatalf("bad interval") - } -} - -func Test_GlobalMetrics_SetGauge(t *testing.T) { - m := &MockSink{} - globalMetrics = &Metrics{sink: m} - - k := []string{"test"} - v := float32(42.0) - SetGauge(k, v) - - if !reflect.DeepEqual(m.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m.vals[0], v) { - t.Fatalf("val not equal") - } -} - -func Test_GlobalMetrics_EmitKey(t *testing.T) { - m := &MockSink{} - globalMetrics = &Metrics{sink: m} - - k := []string{"test"} - v := float32(42.0) - EmitKey(k, v) - - if !reflect.DeepEqual(m.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m.vals[0], v) { - t.Fatalf("val not equal") - } -} - -func Test_GlobalMetrics_IncrCounter(t *testing.T) { - m := &MockSink{} - globalMetrics = &Metrics{sink: m} - - k := []string{"test"} - v := float32(42.0) - IncrCounter(k, v) - - if !reflect.DeepEqual(m.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m.vals[0], v) { - t.Fatalf("val not equal") - } -} - -func Test_GlobalMetrics_AddSample(t *testing.T) { - m := &MockSink{} - globalMetrics = &Metrics{sink: m} - - k := []string{"test"} - v := float32(42.0) - AddSample(k, v) - - if !reflect.DeepEqual(m.keys[0], k) { - t.Fatalf("key not equal") - } - if !reflect.DeepEqual(m.vals[0], v) { - t.Fatalf("val not equal") - } -} - -func Test_GlobalMetrics_MeasureSince(t *testing.T) { - m := &MockSink{} - globalMetrics = &Metrics{sink: m} - globalMetrics.TimerGranularity = time.Millisecond - - k := []string{"test"} - now := time.Now() - MeasureSince(k, now) - - if !reflect.DeepEqual(m.keys[0], k) { - t.Fatalf("key not equal") - } - if m.vals[0] > 0.1 { - t.Fatalf("val too large %v", m.vals[0]) - } -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/statsd.go b/Godeps/_workspace/src/github.com/armon/go-metrics/statsd.go deleted file mode 100644 index 65a5021a0..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/statsd.go +++ /dev/null @@ -1,154 +0,0 @@ -package metrics - -import ( - "bytes" - "fmt" - "log" - "net" - "strings" - "time" -) - -const ( - // statsdMaxLen is the maximum size of a packet - // to send to statsd - statsdMaxLen = 1400 -) - -// StatsdSink provides a MetricSink that can be used -// with a statsite or statsd metrics server. It uses -// only UDP packets, while StatsiteSink uses TCP. -type StatsdSink struct { - addr string - metricQueue chan string -} - -// NewStatsdSink is used to create a new StatsdSink -func NewStatsdSink(addr string) (*StatsdSink, error) { - s := &StatsdSink{ - addr: addr, - metricQueue: make(chan string, 4096), - } - go s.flushMetrics() - return s, nil -} - -// Close is used to stop flushing to statsd -func (s *StatsdSink) Shutdown() { - close(s.metricQueue) -} - -func (s *StatsdSink) SetGauge(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) -} - -func (s *StatsdSink) EmitKey(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) -} - -func (s *StatsdSink) IncrCounter(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) -} - -func (s *StatsdSink) AddSample(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) -} - -// Flattens the key for formatting, removes spaces -func (s *StatsdSink) flattenKey(parts []string) string { - joined := strings.Join(parts, ".") - return strings.Map(func(r rune) rune { - switch r { - case ':': - fallthrough - case ' ': - return '_' - default: - return r - } - }, joined) -} - -// Does a non-blocking push to the metrics queue -func (s *StatsdSink) pushMetric(m string) { - select { - case s.metricQueue <- m: - default: - } -} - -// Flushes metrics -func (s *StatsdSink) flushMetrics() { - var sock net.Conn - var err error - var wait <-chan time.Time - ticker := time.NewTicker(flushInterval) - defer ticker.Stop() - -CONNECT: - // Create a buffer - buf := bytes.NewBuffer(nil) - - // Attempt to connect - sock, err = net.Dial("udp", s.addr) - if err != nil { - log.Printf("[ERR] Error connecting to statsd! Err: %s", err) - goto WAIT - } - - for { - select { - case metric, ok := <-s.metricQueue: - // Get a metric from the queue - if !ok { - goto QUIT - } - - // Check if this would overflow the packet size - if len(metric)+buf.Len() > statsdMaxLen { - _, err := sock.Write(buf.Bytes()) - buf.Reset() - if err != nil { - log.Printf("[ERR] Error writing to statsd! Err: %s", err) - goto WAIT - } - } - - // Append to the buffer - buf.WriteString(metric) - - case <-ticker.C: - if buf.Len() == 0 { - continue - } - - _, err := sock.Write(buf.Bytes()) - buf.Reset() - if err != nil { - log.Printf("[ERR] Error flushing to statsd! Err: %s", err) - goto WAIT - } - } - } - -WAIT: - // Wait for a while - wait = time.After(time.Duration(5) * time.Second) - for { - select { - // Dequeue the messages to avoid backlog - case _, ok := <-s.metricQueue: - if !ok { - goto QUIT - } - case <-wait: - goto CONNECT - } - } -QUIT: - s.metricQueue = nil -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/statsd_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/statsd_test.go deleted file mode 100644 index 622eb5d3a..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/statsd_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package metrics - -import ( - "bufio" - "bytes" - "net" - "testing" - "time" -) - -func TestStatsd_Flatten(t *testing.T) { - s := &StatsdSink{} - flat := s.flattenKey([]string{"a", "b", "c", "d"}) - if flat != "a.b.c.d" { - t.Fatalf("Bad flat") - } -} - -func TestStatsd_PushFullQueue(t *testing.T) { - q := make(chan string, 1) - q <- "full" - - s := &StatsdSink{metricQueue: q} - s.pushMetric("omit") - - out := <-q - if out != "full" { - t.Fatalf("bad val %v", out) - } - - select { - case v := <-q: - t.Fatalf("bad val %v", v) - default: - } -} - -func TestStatsd_Conn(t *testing.T) { - addr := "127.0.0.1:7524" - done := make(chan bool) - go func() { - list, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 7524}) - if err != nil { - panic(err) - } - defer list.Close() - buf := make([]byte, 1500) - n, err := list.Read(buf) - if err != nil { - panic(err) - } - buf = buf[:n] - reader := bufio.NewReader(bytes.NewReader(buf)) - - line, err := reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "gauge.val:1.000000|g\n" { - t.Fatalf("bad line %s", line) - } - - line, err = reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "key.other:2.000000|kv\n" { - t.Fatalf("bad line %s", line) - } - - line, err = reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "counter.me:3.000000|c\n" { - t.Fatalf("bad line %s", line) - } - - line, err = reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "sample.slow_thingy:4.000000|ms\n" { - t.Fatalf("bad line %s", line) - } - - done <- true - }() - s, err := NewStatsdSink(addr) - if err != nil { - t.Fatalf("bad error") - } - - s.SetGauge([]string{"gauge", "val"}, float32(1)) - s.EmitKey([]string{"key", "other"}, float32(2)) - s.IncrCounter([]string{"counter", "me"}, float32(3)) - s.AddSample([]string{"sample", "slow thingy"}, float32(4)) - - select { - case <-done: - s.Shutdown() - case <-time.After(3 * time.Second): - t.Fatalf("timeout") - } -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/statsite.go b/Godeps/_workspace/src/github.com/armon/go-metrics/statsite.go deleted file mode 100644 index 68730139a..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/statsite.go +++ /dev/null @@ -1,142 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "strings" - "time" -) - -const ( - // We force flush the statsite metrics after this period of - // inactivity. Prevents stats from getting stuck in a buffer - // forever. - flushInterval = 100 * time.Millisecond -) - -// StatsiteSink provides a MetricSink that can be used with a -// statsite metrics server -type StatsiteSink struct { - addr string - metricQueue chan string -} - -// NewStatsiteSink is used to create a new StatsiteSink -func NewStatsiteSink(addr string) (*StatsiteSink, error) { - s := &StatsiteSink{ - addr: addr, - metricQueue: make(chan string, 4096), - } - go s.flushMetrics() - return s, nil -} - -// Close is used to stop flushing to statsite -func (s *StatsiteSink) Shutdown() { - close(s.metricQueue) -} - -func (s *StatsiteSink) SetGauge(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) -} - -func (s *StatsiteSink) EmitKey(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) -} - -func (s *StatsiteSink) IncrCounter(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) -} - -func (s *StatsiteSink) AddSample(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) -} - -// Flattens the key for formatting, removes spaces -func (s *StatsiteSink) flattenKey(parts []string) string { - joined := strings.Join(parts, ".") - return strings.Map(func(r rune) rune { - switch r { - case ':': - fallthrough - case ' ': - return '_' - default: - return r - } - }, joined) -} - -// Does a non-blocking push to the metrics queue -func (s *StatsiteSink) pushMetric(m string) { - select { - case s.metricQueue <- m: - default: - } -} - -// Flushes metrics -func (s *StatsiteSink) flushMetrics() { - var sock net.Conn - var err error - var wait <-chan time.Time - var buffered *bufio.Writer - ticker := time.NewTicker(flushInterval) - defer ticker.Stop() - -CONNECT: - // Attempt to connect - sock, err = net.Dial("tcp", s.addr) - if err != nil { - log.Printf("[ERR] Error connecting to statsite! Err: %s", err) - goto WAIT - } - - // Create a buffered writer - buffered = bufio.NewWriter(sock) - - for { - select { - case metric, ok := <-s.metricQueue: - // Get a metric from the queue - if !ok { - goto QUIT - } - - // Try to send to statsite - _, err := buffered.Write([]byte(metric)) - if err != nil { - log.Printf("[ERR] Error writing to statsite! Err: %s", err) - goto WAIT - } - case <-ticker.C: - if err := buffered.Flush(); err != nil { - log.Printf("[ERR] Error flushing to statsite! Err: %s", err) - goto WAIT - } - } - } - -WAIT: - // Wait for a while - wait = time.After(time.Duration(5) * time.Second) - for { - select { - // Dequeue the messages to avoid backlog - case _, ok := <-s.metricQueue: - if !ok { - goto QUIT - } - case <-wait: - goto CONNECT - } - } -QUIT: - s.metricQueue = nil -} diff --git a/Godeps/_workspace/src/github.com/armon/go-metrics/statsite_test.go b/Godeps/_workspace/src/github.com/armon/go-metrics/statsite_test.go deleted file mode 100644 index d9c744f41..000000000 --- a/Godeps/_workspace/src/github.com/armon/go-metrics/statsite_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package metrics - -import ( - "bufio" - "net" - "testing" - "time" -) - -func acceptConn(addr string) net.Conn { - ln, _ := net.Listen("tcp", addr) - conn, _ := ln.Accept() - return conn -} - -func TestStatsite_Flatten(t *testing.T) { - s := &StatsiteSink{} - flat := s.flattenKey([]string{"a", "b", "c", "d"}) - if flat != "a.b.c.d" { - t.Fatalf("Bad flat") - } -} - -func TestStatsite_PushFullQueue(t *testing.T) { - q := make(chan string, 1) - q <- "full" - - s := &StatsiteSink{metricQueue: q} - s.pushMetric("omit") - - out := <-q - if out != "full" { - t.Fatalf("bad val %v", out) - } - - select { - case v := <-q: - t.Fatalf("bad val %v", v) - default: - } -} - -func TestStatsite_Conn(t *testing.T) { - addr := "localhost:7523" - done := make(chan bool) - go func() { - conn := acceptConn(addr) - reader := bufio.NewReader(conn) - - line, err := reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "gauge.val:1.000000|g\n" { - t.Fatalf("bad line %s", line) - } - - line, err = reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "key.other:2.000000|kv\n" { - t.Fatalf("bad line %s", line) - } - - line, err = reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "counter.me:3.000000|c\n" { - t.Fatalf("bad line %s", line) - } - - line, err = reader.ReadString('\n') - if err != nil { - t.Fatalf("unexpected err %s", err) - } - if line != "sample.slow_thingy:4.000000|ms\n" { - t.Fatalf("bad line %s", line) - } - - conn.Close() - done <- true - }() - s, err := NewStatsiteSink(addr) - if err != nil { - t.Fatalf("bad error") - } - - s.SetGauge([]string{"gauge", "val"}, float32(1)) - s.EmitKey([]string{"key", "other"}, float32(2)) - s.IncrCounter([]string{"counter", "me"}, float32(3)) - s.AddSample([]string{"sample", "slow thingy"}, float32(4)) - - select { - case <-done: - s.Shutdown() - case <-time.After(3 * time.Second): - t.Fatalf("timeout") - } -} diff --git a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/exampledata.txt b/Godeps/_workspace/src/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7..000000000 --- a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/stream.go b/Godeps/_workspace/src/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index 587b1fc5b..000000000 --- a/Godeps/_workspace/src/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,292 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targets map[float64]float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for quantile, epsilon := range targets { - if quantile*s.n <= r { - f = (2 * epsilon * r) / quantile - } else { - f = (2 * epsilon * (s.n - r)) / (1 - quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(float64(l) * q) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore b/Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore deleted file mode 100644 index c7bd2b7a5..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.prof -*.test -*.swp -/bin/ diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE b/Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE deleted file mode 100644 index 004e77fe5..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile b/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile deleted file mode 100644 index cfbed514b..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile +++ /dev/null @@ -1,54 +0,0 @@ -TEST=. -BENCH=. -COVERPROFILE=/tmp/c.out -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -default: build - -bench: - go test -v -test.run=NOTHINCONTAINSTHIS -test.bench=$(BENCH) - -# http://cloc.sourceforge.net/ -cloc: - @cloc --not-match-f='Makefile|_test.go' . - -cover: fmt - go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) $(COVERFLAG) . - go tool cover -html=$(COVERPROFILE) - rm $(COVERPROFILE) - -cpuprofile: fmt - @go test -c - @./bolt.test -test.v -test.run=$(TEST) -test.cpuprofile cpu.prof - -# go get github.com/kisielk/errcheck -errcheck: - @echo "=== errcheck ===" - @errcheck github.com/boltdb/bolt - -fmt: - @go fmt ./... - -get: - @go get -d ./... - -build: get - @mkdir -p bin - @go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt ./cmd/bolt - -test: fmt - @go get github.com/stretchr/testify/assert - @echo "=== TESTS ===" - @go test -v -cover -test.run=$(TEST) - @echo "" - @echo "" - @echo "=== CLI ===" - @go test -v -test.run=$(TEST) ./cmd/bolt - @echo "" - @echo "" - @echo "=== RACE DETECTOR ===" - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -.PHONY: bench cloc cover cpuprofile fmt memprofile test diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/README.md b/Godeps/_workspace/src/github.com/boltdb/bolt/README.md deleted file mode 100644 index 00fad6afb..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/README.md +++ /dev/null @@ -1,621 +0,0 @@ -Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.png?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.png)](https://godoc.org/github.com/boltdb/bolt) ![Version](http://img.shields.io/badge/version-1.0-green.png) -==== - -Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] and -the [LMDB project][lmdb]. The goal of the project is to provide a simple, -fast, and reliable database for projects that don't require a full database -server such as Postgres or MySQL. - -Since Bolt is meant to be used as such a low-level piece of functionality, -simplicity is key. The API will be small and only focus on getting values -and setting values. That's it. - -[hyc_symas]: https://twitter.com/hyc_symas -[lmdb]: http://symas.com/mdb/ - - -## Project Status - -Bolt is stable and the API is fixed. Full unit test coverage and randomized -black box testing are used to ensure database consistency and thread safety. -Bolt is currently in high-load production environments serving databases as -large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed -services every day. - - -## Getting Started - -### Installing - -To start using Bolt, install Go and run `go get`: - -```sh -$ go get github.com/boltdb/bolt/... -``` - -This will retrieve the library and install the `bolt` command line utility into -your `$GOBIN` path. - - -### Opening a database - -The top-level object in Bolt is a `DB`. It is represented as a single file on -your disk and represents a consistent snapshot of your data. - -To open your database, simply use the `bolt.Open()` function: - -```go -package main - -import ( - "log" - - "github.com/boltdb/bolt" -) - -func main() { - // Open the my.db data file in your current directory. - // It will be created if it doesn't exist. - db, err := bolt.Open("my.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - defer db.Close() - - ... -} -``` - -Please note that Bolt obtains a file lock on the data file so multiple processes -cannot open the same database at the same time. Opening an already open Bolt -database will cause it to hang until the other process closes it. To prevent -an indefinite wait you can pass a timeout option to the `Open()` function: - -```go -db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) -``` - - -### Transactions - -Bolt allows only one read-write transaction at a time but allows as many -read-only transactions as you want at a time. Each transaction has a consistent -view of the data as it existed when the transaction started. - -Individual transactions and all objects created from them (e.g. buckets, keys) -are not thread safe. To work with data in multiple goroutines you must start -a transaction for each one or use locking to ensure only one goroutine accesses -a transaction at a time. Creating transaction from the `DB` is thread safe. - -Read-only transactions and read-write transactions should not depend on one -another and generally shouldn't be opened simultaneously in the same goroutine. -This can cause a deadlock as the read-write transaction needs to periodically -re-map the data file but it cannot do so while a read-only transaction is open. - - -#### Read-write transactions - -To start a read-write transaction, you can use the `DB.Update()` function: - -```go -err := db.Update(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Inside the closure, you have a consistent view of the database. You commit the -transaction by returning `nil` at the end. You can also rollback the transaction -at any point by returning an error. All database operations are allowed inside -a read-write transaction. - -Always check the return error as it will report any disk failures that can cause -your transaction to not complete. If you return an error within your closure -it will be passed through. - - -#### Read-only transactions - -To start a read-only transaction, you can use the `DB.View()` function: - -```go -err := db.View(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -You also get a consistent view of the database within this closure, however, -no mutating operations are allowed within a read-only transaction. You can only -retrieve buckets, retrieve values, and copy the database within a read-only -transaction. - - -#### Batch read-write transactions - -Each `DB.Update()` waits for disk to commit the writes. This overhead -can be minimized by combining multiple updates with the `DB.Batch()` -function: - -```go -err := db.Batch(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Concurrent Batch calls are opportunistically combined into larger -transactions. Batch is only useful when there are multiple goroutines -calling it. - -The trade-off is that `Batch` can call the given -function multiple times, if parts of the transaction fail. The -function must be idempotent and side effects must take effect only -after a successful return from `DB.Batch()`. - -For example: don't display messages from inside the function, instead -set variables in the enclosing scope: - -```go -var id uint64 -err := db.Batch(func(tx *bolt.Tx) error { - // Find last key in bucket, decode as bigendian uint64, increment - // by one, encode back to []byte, and add new key. - ... - id = newValue - return nil -}) -if err != nil { - return ... -} -fmt.Println("Allocated ID %d", id) -``` - - -#### Managing transactions manually - -The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` -function. These helper functions will start the transaction, execute a function, -and then safely close your transaction if an error is returned. This is the -recommended way to use Bolt transactions. - -However, sometimes you may want to manually start and end your transactions. -You can use the `Tx.Begin()` function directly but _please_ be sure to close the -transaction. - -```go -// Start a writable transaction. -tx, err := db.Begin(true) -if err != nil { - return err -} -defer tx.Rollback() - -// Use the transaction... -_, err := tx.CreateBucket([]byte("MyBucket")) -if err != nil { - return err -} - -// Commit the transaction and check for error. -if err := tx.Commit(); err != nil { - return err -} -``` - -The first argument to `DB.Begin()` is a boolean stating if the transaction -should be writable. - - -### Using buckets - -Buckets are collections of key/value pairs within the database. All keys in a -bucket must be unique. You can create a bucket using the `DB.CreateBucket()` -function: - -```go -db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("MyBucket")) - if err != nil { - return fmt.Errorf("create bucket: %s", err) - } - return nil -}) -``` - -You can also create a bucket only if it doesn't exist by using the -`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this -function for all your top-level buckets after you open your database so you can -guarantee that they exist for future transactions. - -To delete a bucket, simply call the `Tx.DeleteBucket()` function. - - -### Using key/value pairs - -To save a key/value pair to a bucket, use the `Bucket.Put()` function: - -```go -db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - err := b.Put([]byte("answer"), []byte("42")) - return err -}) -``` - -This will set the value of the `"answer"` key to `"42"` in the `MyBucket` -bucket. To retrieve this value, we can use the `Bucket.Get()` function: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - v := b.Get([]byte("answer")) - fmt.Printf("The answer is: %s\n", v) - return nil -}) -``` - -The `Get()` function does not return an error because its operation is -guarenteed to work (unless there is some kind of system failure). If the key -exists then it will return its byte slice value. If it doesn't exist then it -will return `nil`. It's important to note that you can have a zero-length value -set to a key which is different than the key not existing. - -Use the `Bucket.Delete()` function to delete a key from the bucket. - -Please note that values returned from `Get()` are only valid while the -transaction is open. If you need to use a value outside of the transaction -then you must use `copy()` to copy it to another byte slice. - - -### Iterating over keys - -Bolt stores its keys in byte-sorted order within a bucket. This makes sequential -iteration over these keys extremely fast. To iterate over keys we'll use a -`Cursor`: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - c := b.Cursor() - - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -The cursor allows you to move to a specific point in the list of keys and move -forward or backward through the keys one at a time. - -The following functions are available on the cursor: - -``` -First() Move to the first key. -Last() Move to the last key. -Seek() Move to a specific key. -Next() Move to the next key. -Prev() Move to the previous key. -``` - -When you have iterated to the end of the cursor then `Next()` will return `nil`. -You must seek to a position using `First()`, `Last()`, or `Seek()` before -calling `Next()` or `Prev()`. If you do not seek to a position then these -functions will return `nil`. - - -#### Prefix scans - -To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: - -```go -db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("MyBucket")).Cursor() - - prefix := []byte("1234") - for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -#### Range scans - -Another common use case is scanning over a range such as a time range. If you -use a sortable time encoding such as RFC3339 then you can query a specific -date range like this: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume our events bucket has RFC3339 encoded time keys. - c := tx.Bucket([]byte("Events")).Cursor() - - // Our time range spans the 90's decade. - min := []byte("1990-01-01T00:00:00Z") - max := []byte("2000-01-01T00:00:00Z") - - // Iterate over the 90's. - for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { - fmt.Printf("%s: %s\n", k, v) - } - - return nil -}) -``` - - -#### ForEach() - -You can also use the function `ForEach()` if you know you'll be iterating over -all the keys in a bucket: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - b.ForEach(func(k, v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - return nil -}) -``` - - -### Nested buckets - -You can also store a bucket in a key to create nested buckets. The API is the -same as the bucket management API on the `DB` object: - -```go -func (*Bucket) CreateBucket(key []byte) (*Bucket, error) -func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) -func (*Bucket) DeleteBucket(key []byte) error -``` - - -### Database backups - -Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` -function to write a consistent view of the database to a writer. If you call -this from a read-only transaction, it will perform a hot backup and not block -your other database reads and writes. It will also use `O_DIRECT` when available -to prevent page cache trashing. - -One common use case is to backup over HTTP so you can use tools like `cURL` to -do database backups: - -```go -func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { - err := db.View(func(tx *bolt.Tx) error { - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) - w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) - _, err := tx.WriteTo(w) - return err - }) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} -``` - -Then you can backup using this command: - -```sh -$ curl http://localhost/backup > my.db -``` - -Or you can open your browser to `http://localhost/backup` and it will download -automatically. - -If you want to backup to another file you can use the `Tx.CopyFile()` helper -function. - - -### Statistics - -The database keeps a running count of many of the internal operations it -performs so you can better understand what's going on. By grabbing a snapshot -of these stats at two points in time we can see what operations were performed -in that time range. - -For example, we could start a goroutine to log stats every 10 seconds: - -```go -go func() { - // Grab the initial stats. - prev := db.Stats() - - for { - // Wait for 10s. - time.Sleep(10 * time.Second) - - // Grab the current stats and diff them. - stats := db.Stats() - diff := stats.Sub(&prev) - - // Encode stats to JSON and print to STDERR. - json.NewEncoder(os.Stderr).Encode(diff) - - // Save stats for the next loop. - prev = stats - } -}() -``` - -It's also useful to pipe these stats to a service such as statsd for monitoring -or to provide an HTTP endpoint that will perform a fixed-length sample. - - -### Read-Only Mode - -Sometimes it is useful to create a shared, read-only Bolt database. To this, -set the `Options.ReadOnly` flag when opening your database. Read-only mode -uses a shared lock to allow multiple processes to read from the database but -it will block any processes from opening the database in read-write mode. - -```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) -if err != nil { - log.Fatal(err) -} -``` - - -## Resources - -For more information on getting started with Bolt, check out the following articles: - -* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). -* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville - - -## Comparison with other databases - -### Postgres, MySQL, & other relational databases - -Relational databases structure data into rows and are only accessible through -the use of SQL. This approach provides flexibility in how you store and query -your data but also incurs overhead in parsing and planning SQL statements. Bolt -accesses all data by a byte slice key. This makes Bolt fast to read and write -data by key but provides no built-in support for joining values together. - -Most relational databases (with the exception of SQLite) are standalone servers -that run separately from your application. This gives your systems -flexibility to connect multiple application servers to a single database -server but also adds overhead in serializing and transporting data over the -network. Bolt runs as a library included in your application so all data access -has to go through your application's process. This brings data closer to your -application but limits multi-process access to the data. - - -### LevelDB, RocksDB - -LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that -they are libraries bundled into the application, however, their underlying -structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes -random writes by using a write ahead log and multi-tiered, sorted files called -SSTables. Bolt uses a B+tree internally and only a single file. Both approaches -have trade offs. - -If you require a high random write throughput (>10,000 w/sec) or you need to use -spinning disks then LevelDB could be a good choice. If your application is -read-heavy or does a lot of range scans then Bolt could be a good choice. - -One other important consideration is that LevelDB does not have transactions. -It supports batch writing of key/values pairs and it supports read snapshots -but it will not give you the ability to do a compare-and-swap operation safely. -Bolt supports fully serializable ACID transactions. - - -### LMDB - -Bolt was originally a port of LMDB so it is architecturally similar. Both use -a B+tree, have ACID semantics with fully serializable transactions, and support -lock-free MVCC using a single writer and multiple readers. - -The two projects have somewhat diverged. LMDB heavily focuses on raw performance -while Bolt has focused on simplicity and ease of use. For example, LMDB allows -several unsafe actions such as direct writes for the sake of performance. Bolt -opts to disallow actions which can leave the database in a corrupted state. The -only exception to this in Bolt is `DB.NoSync`. - -There are also a few differences in API. LMDB requires a maximum mmap size when -opening an `mdb_env` whereas Bolt will handle incremental mmap resizing -automatically. LMDB overloads the getter and setter functions with multiple -flags whereas Bolt splits these specialized cases into their own functions. - - -## Caveats & Limitations - -It's important to pick the right tool for the job and Bolt is no exception. -Here are a few things to note when evaluating and using Bolt: - -* Bolt is good for read intensive workloads. Sequential write performance is - also fast but random writes can be slow. You can add a write-ahead log or - [transaction coalescer](https://github.com/boltdb/coalescer) in front of Bolt - to mitigate this issue. - -* Bolt uses a B+tree internally so there can be a lot of random page access. - SSDs provide a significant performance boost over spinning disks. - -* Try to avoid long running read transactions. Bolt uses copy-on-write so - old pages cannot be reclaimed while an old transaction is using them. - -* Byte slices returned from Bolt are only valid during a transaction. Once the - transaction has been committed or rolled back then the memory they point to - can be reused by a new page or can be unmapped from virtual memory and you'll - see an `unexpected fault address` panic when accessing it. - -* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for - buckets that have random inserts will cause your database to have very poor - page utilization. - -* Use larger buckets in general. Smaller buckets causes poor page utilization - once they become larger than the page size (typically 4KB). - -* Bulk loading a lot of random writes into a new bucket can be slow as the - page will not split until the transaction is committed. Randomly inserting - more than 100,000 key/value pairs into a single new bucket in a single - transaction is not advised. - -* Bolt uses a memory-mapped file so the underlying operating system handles the - caching of the data. Typically, the OS will cache as much of the file as it - can in memory and will release memory as needed to other processes. This means - that Bolt can show very high memory usage when working with large databases. - However, this is expected and the OS will release memory as needed. Bolt can - handle databases much larger than the available physical RAM. - -* The data structures in the Bolt database are memory mapped so the data file - will be endian specific. This means that you cannot copy a Bolt file from a - little endian machine to a big endian machine and have it work. For most - users this is not a concern since most modern CPUs are little endian. - -* Because of the way pages are laid out on disk, Bolt cannot truncate data files - and return free pages back to the disk. Instead, Bolt maintains a free list - of unused pages within its data file. These free pages can be reused by later - transactions. This works well for many use cases as databases generally tend - to grow. However, it's important to note that deleting large chunks of data - will not allow you to reclaim that space on disk. - - For more information on page allocation, [see this comment][page-allocation]. - -[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 - - -## Other Projects Using Bolt - -Below is a list of public, open source projects that use Bolt: - -* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. -* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. -* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. -* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. -* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. -* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. -* [ChainStore](https://github.com/nulayer/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. -* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. -* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". -* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. -* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. -* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [photosite/session](http://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. -* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. -* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. -* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. -* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. -* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. -* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database. -* [Seaweed File System](https://github.com/chrislusf/weed-fs) - Highly scalable distributed key~file system with O(1) disk read. -* [InfluxDB](http://influxdb.com) - Scalable datastore for metrics, events, and real-time analytics. -* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. -* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. -* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistant, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. -* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. - -If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go b/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go deleted file mode 100644 index 84acae6bb..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go +++ /dev/null @@ -1,138 +0,0 @@ -package bolt - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// Batch calls fn as part of a batch. It behaves similar to Update, -// except: -// -// 1. concurrent Batch calls can be combined into a single Bolt -// transaction. -// -// 2. the function passed to Batch may be called multiple times, -// regardless of whether it returns error or not. -// -// This means that Batch function side effects must be idempotent and -// take permanent effect only after a successful return is seen in -// caller. -// -// The maximum batch size and delay can be adjusted with DB.MaxBatchSize -// and DB.MaxBatchDelay, respectively. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *DB) Batch(fn func(*Tx) error) error { - errCh := make(chan error, 1) - - db.batchMu.Lock() - if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { - // There is no existing batch, or the existing batch is full; start a new one. - db.batch = &batch{ - db: db, - } - db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) - } - db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) - if len(db.batch.calls) >= db.MaxBatchSize { - // wake up batch, it's ready to run - go db.batch.trigger() - } - db.batchMu.Unlock() - - err := <-errCh - if err == trySolo { - err = db.Update(fn) - } - return err -} - -type call struct { - fn func(*Tx) error - err chan<- error -} - -type batch struct { - db *DB - timer *time.Timer - start sync.Once - calls []call -} - -// trigger runs the batch if it hasn't already been run. -func (b *batch) trigger() { - b.start.Do(b.run) -} - -// run performs the transactions in the batch and communicates results -// back to DB.Batch. -func (b *batch) run() { - b.db.batchMu.Lock() - b.timer.Stop() - // Make sure no new work is added to this batch, but don't break - // other batches. - if b.db.batch == b { - b.db.batch = nil - } - b.db.batchMu.Unlock() - -retry: - for len(b.calls) > 0 { - var failIdx = -1 - err := b.db.Update(func(tx *Tx) error { - for i, c := range b.calls { - if err := safelyCall(c.fn, tx); err != nil { - failIdx = i - return err - } - } - return nil - }) - - if failIdx >= 0 { - // take the failing transaction out of the batch. it's - // safe to shorten b.calls here because db.batch no longer - // points to us, and we hold the mutex anyway. - c := b.calls[failIdx] - b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] - // tell the submitter re-run it solo, continue with the rest of the batch - c.err <- trySolo - continue retry - } - - // pass success, or bolt internal errors, to all callers - for _, c := range b.calls { - if c.err != nil { - c.err <- err - } - } - break retry - } -} - -// trySolo is a special sentinel error value used for signaling that a -// transaction function should be re-run. It should never be seen by -// callers. -var trySolo = errors.New("batch function returned an error and should be re-run solo") - -type panicked struct { - reason interface{} -} - -func (p panicked) Error() string { - if err, ok := p.reason.(error); ok { - return err.Error() - } - return fmt.Sprintf("panic: %v", p.reason) -} - -func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { - defer func() { - if p := recover(); p != nil { - err = panicked{p} - } - }() - return fn(tx) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_benchmark_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/batch_benchmark_test.go deleted file mode 100644 index b745a371f..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_benchmark_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "errors" - "hash/fnv" - "sync" - "testing" - - "github.com/boltdb/bolt" -) - -func validateBatchBench(b *testing.B, db *TestDB) { - var rollback = errors.New("sentinel error to cause rollback") - validate := func(tx *bolt.Tx) error { - bucket := tx.Bucket([]byte("bench")) - h := fnv.New32a() - buf := make([]byte, 4) - for id := uint32(0); id < 1000; id++ { - binary.LittleEndian.PutUint32(buf, id) - h.Reset() - h.Write(buf[:]) - k := h.Sum(nil) - v := bucket.Get(k) - if v == nil { - b.Errorf("not found id=%d key=%x", id, k) - continue - } - if g, e := v, []byte("filler"); !bytes.Equal(g, e) { - b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e) - } - if err := bucket.Delete(k); err != nil { - return err - } - } - // should be empty now - c := bucket.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - b.Errorf("unexpected key: %x = %q", k, v) - } - return rollback - } - if err := db.Update(validate); err != nil && err != rollback { - b.Error(err) - } -} - -func BenchmarkDBBatchAutomatic(b *testing.B) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("bench")) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for round := 0; round < 1000; round++ { - wg.Add(1) - - go func(id uint32) { - defer wg.Done() - <-start - - h := fnv.New32a() - buf := make([]byte, 4) - binary.LittleEndian.PutUint32(buf, id) - h.Write(buf[:]) - k := h.Sum(nil) - insert := func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("bench")) - return b.Put(k, []byte("filler")) - } - if err := db.Batch(insert); err != nil { - b.Error(err) - return - } - }(uint32(round)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} - -func BenchmarkDBBatchSingle(b *testing.B) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("bench")) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for round := 0; round < 1000; round++ { - wg.Add(1) - go func(id uint32) { - defer wg.Done() - <-start - - h := fnv.New32a() - buf := make([]byte, 4) - binary.LittleEndian.PutUint32(buf, id) - h.Write(buf[:]) - k := h.Sum(nil) - insert := func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("bench")) - return b.Put(k, []byte("filler")) - } - if err := db.Update(insert); err != nil { - b.Error(err) - return - } - }(uint32(round)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} - -func BenchmarkDBBatchManual10x100(b *testing.B) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("bench")) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for major := 0; major < 10; major++ { - wg.Add(1) - go func(id uint32) { - defer wg.Done() - <-start - - insert100 := func(tx *bolt.Tx) error { - h := fnv.New32a() - buf := make([]byte, 4) - for minor := uint32(0); minor < 100; minor++ { - binary.LittleEndian.PutUint32(buf, uint32(id*100+minor)) - h.Reset() - h.Write(buf[:]) - k := h.Sum(nil) - b := tx.Bucket([]byte("bench")) - if err := b.Put(k, []byte("filler")); err != nil { - return err - } - } - return nil - } - if err := db.Update(insert100); err != nil { - b.Fatal(err) - } - }(uint32(major)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_example_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/batch_example_test.go deleted file mode 100644 index 74eff8af9..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_example_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package bolt_test - -import ( - "encoding/binary" - "fmt" - "io/ioutil" - "log" - "math/rand" - "net/http" - "net/http/httptest" - "os" - - "github.com/boltdb/bolt" -) - -// Set this to see how the counts are actually updated. -const verbose = false - -// Counter updates a counter in Bolt for every URL path requested. -type counter struct { - db *bolt.DB -} - -func (c counter) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - // Communicates the new count from a successful database - // transaction. - var result uint64 - - increment := func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("hits")) - if err != nil { - return err - } - key := []byte(req.URL.String()) - // Decode handles key not found for us. - count := decode(b.Get(key)) + 1 - b.Put(key, encode(count)) - // All good, communicate new count. - result = count - return nil - } - if err := c.db.Batch(increment); err != nil { - http.Error(rw, err.Error(), 500) - return - } - - if verbose { - log.Printf("server: %s: %d", req.URL.String(), result) - } - - rw.Header().Set("Content-Type", "application/octet-stream") - fmt.Fprintf(rw, "%d\n", result) -} - -func client(id int, base string, paths []string) error { - // Process paths in random order. - rng := rand.New(rand.NewSource(int64(id))) - permutation := rng.Perm(len(paths)) - - for i := range paths { - path := paths[permutation[i]] - resp, err := http.Get(base + path) - if err != nil { - return err - } - defer resp.Body.Close() - buf, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - if verbose { - log.Printf("client: %s: %s", path, buf) - } - } - return nil -} - -func ExampleDB_Batch() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start our web server - count := counter{db} - srv := httptest.NewServer(count) - defer srv.Close() - - // Decrease the batch size to make things more interesting. - db.MaxBatchSize = 3 - - // Get every path multiple times concurrently. - const clients = 10 - paths := []string{ - "/foo", - "/bar", - "/baz", - "/quux", - "/thud", - "/xyzzy", - } - errors := make(chan error, clients) - for i := 0; i < clients; i++ { - go func(id int) { - errors <- client(id, srv.URL, paths) - }(i) - } - // Check all responses to make sure there's no error. - for i := 0; i < clients; i++ { - if err := <-errors; err != nil { - fmt.Printf("client error: %v", err) - return - } - } - - // Check the final result - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("hits")) - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("hits to %s: %d\n", k, decode(v)) - } - return nil - }) - - // Output: - // hits to /bar: 10 - // hits to /baz: 10 - // hits to /foo: 10 - // hits to /quux: 10 - // hits to /thud: 10 - // hits to /xyzzy: 10 -} - -// encode marshals a counter. -func encode(n uint64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, n) - return buf -} - -// decode unmarshals a counter. Nil buffers are decoded as 0. -func decode(buf []byte) uint64 { - if buf == nil { - return 0 - } - return binary.BigEndian.Uint64(buf) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/batch_test.go deleted file mode 100644 index 0b5075fdd..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/batch_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package bolt_test - -import ( - "testing" - "time" - - "github.com/boltdb/bolt" -) - -// Ensure two functions can perform updates in a single batch. -func TestDB_Batch(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("widgets")) - - // Iterate over multiple updates in separate goroutines. - n := 2 - ch := make(chan error) - for i := 0; i < n; i++ { - go func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - }(i) - } - - // Check all responses to make sure there's no error. - for i := 0; i < n; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - db.MustView(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 0; i < n; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }) -} - -func TestDB_Batch_Panic(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var sentinel int - var bork = &sentinel - var problem interface{} - var err error - - // Execute a function inside a batch that panics. - func() { - defer func() { - if p := recover(); p != nil { - problem = p - } - }() - err = db.Batch(func(tx *bolt.Tx) error { - panic(bork) - }) - }() - - // Verify there is no error. - if g, e := err, error(nil); g != e { - t.Fatalf("wrong error: %v != %v", g, e) - } - // Verify the panic was captured. - if g, e := problem, bork; g != e { - t.Fatalf("wrong error: %v != %v", g, e) - } -} - -func TestDB_BatchFull(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("widgets")) - - const size = 3 - // buffered so we never leak goroutines - ch := make(chan error, size) - put := func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - } - - db.MaxBatchSize = size - // high enough to never trigger here - db.MaxBatchDelay = 1 * time.Hour - - go put(1) - go put(2) - - // Give the batch a chance to exhibit bugs. - time.Sleep(10 * time.Millisecond) - - // not triggered yet - select { - case <-ch: - t.Fatalf("batch triggered too early") - default: - } - - go put(3) - - // Check all responses to make sure there's no error. - for i := 0; i < size; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - db.MustView(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i <= size; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }) -} - -func TestDB_BatchTime(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("widgets")) - - const size = 1 - // buffered so we never leak goroutines - ch := make(chan error, size) - put := func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - } - - db.MaxBatchSize = 1000 - db.MaxBatchDelay = 0 - - go put(1) - - // Batch must trigger by time alone. - - // Check all responses to make sure there's no error. - for i := 0; i < size; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - db.MustView(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i <= size; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go deleted file mode 100644 index e659bfb91..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go +++ /dev/null @@ -1,7 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go deleted file mode 100644 index cca6b7eb7..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go +++ /dev/null @@ -1,7 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go deleted file mode 100644 index e659bfb91..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go +++ /dev/null @@ -1,7 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go deleted file mode 100644 index e9d1c907b..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -package bolt - -import ( - "syscall" -) - -var odirect = syscall.O_DIRECT - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return syscall.Fdatasync(int(db.file.Fd())) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go deleted file mode 100644 index 7c1bef1a4..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go +++ /dev/null @@ -1,29 +0,0 @@ -package bolt - -import ( - "syscall" - "unsafe" -) - -const ( - msAsync = 1 << iota // perform asynchronous writes - msSync // perform synchronous writes - msInvalidate // invalidate cached data -) - -var odirect int - -func msync(db *DB) error { - _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) - if errno != 0 { - return errno - } - return nil -} - -func fdatasync(db *DB) error { - if db.data != nil { - return msync(db) - } - return db.file.Sync() -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_test.go deleted file mode 100644 index b7bea1fc5..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package bolt_test - -import ( - "fmt" - "path/filepath" - "reflect" - "runtime" - "testing" -) - -// assert fails the test if the condition is false. -func assert(tb testing.TB, condition bool, msg string, v ...interface{}) { - if !condition { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...) - tb.FailNow() - } -} - -// ok fails the test if an err is not nil. -func ok(tb testing.TB, err error) { - if err != nil { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) - tb.FailNow() - } -} - -// equals fails the test if exp is not equal to act. -func equals(tb testing.TB, exp, act interface{}) { - if !reflect.DeepEqual(exp, act) { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) - tb.FailNow() - } -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go deleted file mode 100644 index 17ca318bf..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build !windows,!plan9 - -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(f *os.File, exclusive bool, timeout time.Duration) error { - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - flag := syscall.LOCK_SH - if exclusive { - flag = syscall.LOCK_EX - } - - // Otherwise attempt to obtain an exclusive lock. - err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB) - if err == nil { - return nil - } else if err != syscall.EWOULDBLOCK { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(f *os.File) error { - return syscall.Flock(int(f.Fd()), syscall.LOCK_UN) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - } - - // Map the data file to memory. - b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := syscall.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} - -// NOTE: This function is copied from stdlib because it is not available on darwin. -func madvise(b []byte, advice int) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = e1 - } - return -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go deleted file mode 100644 index 8b782be5f..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go +++ /dev/null @@ -1,76 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -var odirect int - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} - -// flock acquires an advisory lock on a file descriptor. -func flock(f *os.File, _ bool, _ time.Duration) error { - return nil -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(f *os.File) error { - return nil -} - -// mmap memory maps a DB's data file. -// Based on: https://github.com/edsrzf/mmap-go -func mmap(db *DB, sz int) error { - if !db.readOnly { - // Truncate the database to the size of the mmap. - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("truncate: %s", err) - } - } - - // Open a file mapping handle. - sizelo := uint32(sz >> 32) - sizehi := uint32(sz) & 0xffffffff - h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) - if h == 0 { - return os.NewSyscallError("CreateFileMapping", errno) - } - - // Create the memory map. - addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) - if addr == 0 { - return os.NewSyscallError("MapViewOfFile", errno) - } - - // Close mapping handle. - if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { - return os.NewSyscallError("CloseHandle", err) - } - - // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) - db.datasz = sz - - return nil -} - -// munmap unmaps a pointer from a file. -// Based on: https://github.com/edsrzf/mmap-go -func munmap(db *DB) error { - if db.data == nil { - return nil - } - - addr := (uintptr)(unsafe.Pointer(&db.data[0])) - if err := syscall.UnmapViewOfFile(addr); err != nil { - return os.NewSyscallError("UnmapViewOfFile", err) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go b/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go deleted file mode 100644 index 8db89776f..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows,!plan9,!linux,!openbsd - -package bolt - -var odirect int - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go deleted file mode 100644 index 676699210..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go +++ /dev/null @@ -1,743 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "unsafe" -) - -const ( - // MaxKeySize is the maximum length of a key, in bytes. - MaxKeySize = 32768 - - // MaxValueSize is the maximum length of a value, in bytes. - MaxValueSize = 4294967295 -) - -const ( - maxUint = ^uint(0) - minUint = 0 - maxInt = int(^uint(0) >> 1) - minInt = -maxInt - 1 -) - -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - -const ( - minFillPercent = 0.1 - maxFillPercent = 1.0 -) - -// DefaultFillPercent is the percentage that split pages are filled. -// This value can be changed by setting Bucket.FillPercent. -const DefaultFillPercent = 0.5 - -// Bucket represents a collection of key/value pairs inside the database. -type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache - - // Sets the threshold for filling nodes when they split. By default, - // the bucket will fill to 50% but it can be useful to increase this - // amount if you know that your write workloads are mostly append-only. - // - // This is non-persisted across transactions so it must be set in every Tx. - FillPercent float64 -} - -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - -// newBucket returns a new bucket associated with a transaction. -func newBucket(tx *Tx) Bucket { - var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} - if tx.writable { - b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) - } - return b -} - -// Tx returns the tx of the bucket. -func (b *Bucket) Tx() *Tx { - return b.tx -} - -// Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root -} - -// Writable returns whether the bucket is writable. -func (b *Bucket) Writable() bool { - return b.tx.writable -} - -// Cursor creates a cursor associated with the bucket. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (b *Bucket) Cursor() *Cursor { - // Update transaction statistics. - b.tx.stats.CursorCount++ - - // Allocate and return a cursor. - return &Cursor{ - bucket: b, - stack: make([]elemRef, 0), - } -} - -// Bucket retrieves a nested bucket by name. -// Returns nil if the bucket does not exist. -func (b *Bucket) Bucket(name []byte) *Bucket { - if b.buckets != nil { - if child := b.buckets[string(name)]; child != nil { - return child - } - } - - // Move cursor to key. - c := b.Cursor() - k, v, flags := c.seek(name) - - // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { - return nil - } - - // Otherwise create a bucket and cache it. - var child = b.openBucket(v) - if b.buckets != nil { - b.buckets[string(name)] = child - } - - return child -} - -// Helper method that re-interprets a sub-bucket value -// from a parent into a Bucket -func (b *Bucket) openBucket(value []byte) *Bucket { - var child = newBucket(b.tx) - - // If this is a writable transaction then we need to copy the bucket entry. - // Read-only transactions can point directly at the mmap entry. - if b.tx.writable { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) - } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) - } - - // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - } - - return &child -} - -// CreateBucket creates a new bucket at the given key and returns the new bucket. -// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { - if b.tx.db == nil { - return nil, ErrTxClosed - } else if !b.tx.writable { - return nil, ErrTxNotWritable - } else if len(key) == 0 { - return nil, ErrBucketNameRequired - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key. - if bytes.Equal(key, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists - } else { - return nil, ErrIncompatibleValue - } - } - - // Create empty, inline bucket. - var bucket = Bucket{ - bucket: &bucket{}, - rootNode: &node{isLeaf: true}, - FillPercent: DefaultFillPercent, - } - var value = bucket.write() - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, bucketLeafFlag) - - // Since subbuckets are not allowed on inline buckets, we need to - // dereference the inline page, if it exists. This will cause the bucket - // to be treated as a regular, non-inline bucket for the rest of the tx. - b.page = nil - - return b.Bucket(key), nil -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err - } - return child, nil -} - -// DeleteBucket deletes a bucket at the given key. -// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue - } - - // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEach(func(k, v []byte) error { - if v == nil { - if err := child.DeleteBucket(k); err != nil { - return fmt.Errorf("delete bucket: %s", err) - } - } - return nil - }) - if err != nil { - return err - } - - // Remove cached copy. - delete(b.buckets, string(key)) - - // Release all bucket pages to freelist. - child.nodes = nil - child.rootNode = nil - child.free() - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Get retrieves the value for a key in the bucket. -// Returns a nil value if the key does not exist or if the key is a nested bucket. -// The returned value is only valid for the life of the transaction. -func (b *Bucket) Get(key []byte) []byte { - k, v, flags := b.Cursor().seek(key) - - // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { - return nil - } - - // If our target node isn't the same key as what's passed in then return nil. - if !bytes.Equal(key, k) { - return nil - } - return v -} - -// Put sets the value for a key in the bucket. -// If the key exist then its previous value will be overwritten. -// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } else if len(key) == 0 { - return ErrKeyRequired - } else if len(key) > MaxKeySize { - return ErrKeyTooLarge - } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, 0) - - return nil -} - -// Delete removes a key from the bucket. -// If the key does not exist then nothing is done and a nil error is returned. -// Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - _, _, flags := c.seek(key) - - // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// NextSequence returns an autoincrementing integer for the bucket. -func (b *Bucket) NextSequence() (uint64, error) { - if b.tx.db == nil { - return 0, ErrTxClosed - } else if !b.Writable() { - return 0, ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil -} - -// ForEach executes a function for each key/value pair in a bucket. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. -func (b *Bucket) ForEach(fn func(k, v []byte) error) error { - if b.tx.db == nil { - return ErrTxClosed - } - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if err := fn(k, v); err != nil { - return err - } - } - return nil -} - -// Stat returns stats on a bucket. -func (b *Bucket) Stats() BucketStats { - var s, subStats BucketStats - pageSize := b.tx.db.pageSize - s.BucketN += 1 - if b.root == 0 { - s.InlineBucketN += 1 - } - b.forEachPage(func(p *page, depth int) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) - - // used totals the used bytes for the page - used := pageHeaderSize - - if p.count != 0 { - // If page has any elements, add all element headers. - used += leafPageElementSize * int(p.count-1) - - // Add all element key, value sizes. - // The computation takes advantage of the fact that the position - // of the last element's key/value equals to the total of the sizes - // of all previous elements' keys and values. - // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) - } - - if b.root == 0 { - // For inlined bucket just update the inline stats - s.InlineBucketInuse += used - } else { - // For non-inlined bucket update all the leaf stats - s.LeafPageN++ - s.LeafInuse += used - s.LeafOverflowN += int(p.overflow) - - // Collect stats from sub-buckets. - // Do that by iterating over all element headers - // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { - // For any bucket element, open the element value - // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) - } - } - } - } else if (p.flags & branchPageFlag) != 0 { - s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) - - // used totals the used bytes for the page - // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) - - // Add size of all keys and values. - // Again, use the fact that last element's position equals to - // the total of key, value sizes of all previous elements. - used += int(lastElement.pos + lastElement.ksize) - s.BranchInuse += used - s.BranchOverflowN += int(p.overflow) - } - - // Keep track of maximum page depth. - if depth+1 > s.Depth { - s.Depth = (depth + 1) - } - }) - - // Alloc stats can be computed from page counts and pageSize. - s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize - s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize - - // Add the max depth of sub-buckets to get total nested depth. - s.Depth += subStats.Depth - // Add the stats for all sub-buckets - s.Add(subStats) - return s -} - -// forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int)) { - // If we have an inline page then just use that. - if b.page != nil { - fn(b.page, 0) - return - } - - // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, 0, fn) -} - -// forEachPageNode iterates over every page (or node) in a bucket. -// This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { - // If we have an inline page or root node then just use that. - if b.page != nil { - fn(b.page, nil, 0) - return - } - b._forEachPageNode(b.root, 0, fn) -} - -func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { - var p, n = b.pageNode(pgid) - - // Execute function. - fn(p, n, depth) - - // Recursively loop over children. - if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) - } - } - } else { - if !n.isLeaf { - for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) - } - } - } -} - -// spill writes all the nodes for this bucket to dirty pages. -func (b *Bucket) spill() error { - // Spill all child buckets first. - for name, child := range b.buckets { - // If the child bucket is small enough and it has no child buckets then - // write it inline into the parent bucket's page. Otherwise spill it - // like a normal bucket and make the parent value a pointer to the page. - var value []byte - if child.inlineable() { - child.free() - value = child.write() - } else { - if err := child.spill(); err != nil { - return err - } - - // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket - } - - // Skip writing the bucket if there are no materialized nodes. - if child.rootNode == nil { - continue - } - - // Update parent node. - var c = b.Cursor() - k, _, flags := c.seek([]byte(name)) - if !bytes.Equal([]byte(name), k) { - panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) - } - if flags&bucketLeafFlag == 0 { - panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) - } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) - } - - // Ignore if there's not a materialized root node. - if b.rootNode == nil { - return nil - } - - // Spill nodes. - if err := b.rootNode.spill(); err != nil { - return err - } - b.rootNode = b.rootNode.root() - - // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) - } - b.root = b.rootNode.pgid - - return nil -} - -// inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. -func (b *Bucket) inlineable() bool { - var n = b.rootNode - - // Bucket must only contain a single leaf node. - if n == nil || !n.isLeaf { - return false - } - - // Bucket is not inlineable if it contains subbuckets or if it goes beyond - // our threshold for inline bucket size. - var size = pageHeaderSize - for _, inode := range n.inodes { - size += leafPageElementSize + len(inode.key) + len(inode.value) - - if inode.flags&bucketLeafFlag != 0 { - return false - } else if size > b.maxInlineBucketSize() { - return false - } - } - - return true -} - -// Returns the maximum total size of a bucket to make it a candidate for inlining. -func (b *Bucket) maxInlineBucketSize() int { - return b.tx.db.pageSize / 4 -} - -// write allocates and writes a bucket to a byte slice. -func (b *Bucket) write() []byte { - // Allocate the appropriate size. - var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) - - // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket - - // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - n.write(p) - - return value -} - -// rebalance attempts to balance all nodes. -func (b *Bucket) rebalance() { - for _, n := range b.nodes { - n.rebalance() - } - for _, child := range b.buckets { - child.rebalance() - } -} - -// node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgid pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") - - // Retrieve node if it's already been created. - if n := b.nodes[pgid]; n != nil { - return n - } - - // Otherwise create a node and cache it. - n := &node{bucket: b, parent: parent} - if parent == nil { - b.rootNode = n - } else { - parent.children = append(parent.children, n) - } - - // Use the inline page if this is an inline bucket. - var p = b.page - if p == nil { - p = b.tx.page(pgid) - } - - // Read the page into the node and cache it. - n.read(p) - b.nodes[pgid] = n - - // Update statistics. - b.tx.stats.NodeCount++ - - return n -} - -// free recursively frees all pages in the bucket. -func (b *Bucket) free() { - if b.root == 0 { - return - } - - var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { - if p != nil { - tx.db.freelist.free(tx.meta.txid, p) - } else { - n.free() - } - }) - b.root = 0 -} - -// dereference removes all references to the old mmap. -func (b *Bucket) dereference() { - if b.rootNode != nil { - b.rootNode.root().dereference() - } - - for _, child := range b.buckets { - child.dereference() - } -} - -// pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { - // Inline buckets have a fake page embedded in their value so treat them - // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { - if id != 0 { - panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) - } - if b.rootNode != nil { - return nil, b.rootNode - } - return b.page, nil - } - - // Check the node cache for non-inline buckets. - if b.nodes != nil { - if n := b.nodes[id]; n != nil { - return nil, n - } - } - - // Finally lookup the page from the transaction if no node is materialized. - return b.tx.page(id), nil -} - -// BucketStats records statistics about resources used by a bucket. -type BucketStats struct { - // Page count statistics. - BranchPageN int // number of logical branch pages - BranchOverflowN int // number of physical branch overflow pages - LeafPageN int // number of logical leaf pages - LeafOverflowN int // number of physical leaf overflow pages - - // Tree statistics. - KeyN int // number of keys/value pairs - Depth int // number of levels in B+tree - - // Page size utilization. - BranchAlloc int // bytes allocated for physical branch pages - BranchInuse int // bytes actually used for branch data - LeafAlloc int // bytes allocated for physical leaf pages - LeafInuse int // bytes actually used for leaf data - - // Bucket statistics - BucketN int // total number of buckets including the top bucket - InlineBucketN int // total number on inlined buckets - InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) -} - -func (s *BucketStats) Add(other BucketStats) { - s.BranchPageN += other.BranchPageN - s.BranchOverflowN += other.BranchOverflowN - s.LeafPageN += other.LeafPageN - s.LeafOverflowN += other.LeafOverflowN - s.KeyN += other.KeyN - if s.Depth < other.Depth { - s.Depth = other.Depth - } - s.BranchAlloc += other.BranchAlloc - s.BranchInuse += other.BranchInuse - s.LeafAlloc += other.LeafAlloc - s.LeafInuse += other.LeafInuse - - s.BucketN += other.BucketN - s.InlineBucketN += other.InlineBucketN - s.InlineBucketInuse += other.InlineBucketInuse -} - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go deleted file mode 100644 index 62b8c5878..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go +++ /dev/null @@ -1,1169 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "math/rand" - "os" - "strconv" - "strings" - "testing" - "testing/quick" - - "github.com/boltdb/bolt" -) - -// Ensure that a bucket that gets a non-existent key returns nil. -func TestBucket_Get_NonExistent(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert(t, value == nil, "") - return nil - }) -} - -// Ensure that a bucket can read a value that is not flushed yet. -func TestBucket_Get_FromNode(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("foo"), []byte("bar")) - value := b.Get([]byte("foo")) - equals(t, []byte("bar"), value) - return nil - }) -} - -// Ensure that a bucket retrieved via Get() returns a nil. -func TestBucket_Get_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") - return nil - }) -} - -// Ensure that a bucket can write a key/value. -func TestBucket_Put(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - ok(t, err) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - equals(t, value, []byte("bar")) - return nil - }) -} - -// Ensure that a bucket can rewrite a key in the same transaction. -func TestBucket_Put_Repeat(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - ok(t, b.Put([]byte("foo"), []byte("bar"))) - ok(t, b.Put([]byte("foo"), []byte("baz"))) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - equals(t, value, []byte("baz")) - return nil - }) -} - -// Ensure that a bucket can write a bunch of large values. -func TestBucket_Put_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() - - count, factor := 100, 200 - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - for i := 1; i < count; i++ { - ok(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor)))) - } - return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i < count; i++ { - value := b.Get([]byte(strings.Repeat("0", i*factor))) - equals(t, []byte(strings.Repeat("X", (count-i)*factor)), value) - } - return nil - }) -} - -// Ensure that a database can perform multiple large appends safely. -func TestDB_Put_VeryLarge(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - n, batchN := 400000, 200000 - ksize, vsize := 8, 500 - - db := NewTestDB() - defer db.Close() - - for i := 0; i < n; i += batchN { - err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) - for j := 0; j < batchN; j++ { - k, v := make([]byte, ksize), make([]byte, vsize) - binary.BigEndian.PutUint32(k, uint32(i+j)) - ok(t, b.Put(k, v)) - } - return nil - }) - ok(t, err) - } -} - -// Ensure that a setting a value on a key with a bucket value returns an error. -func TestBucket_Put_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - return nil - }) -} - -// Ensure that a setting a value while the transaction is closed returns an error. -func TestBucket_Put_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - equals(t, bolt.ErrTxClosed, b.Put([]byte("foo"), []byte("bar"))) -} - -// Ensure that setting a value on a read-only bucket returns an error. -func TestBucket_Put_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - err := b.Put([]byte("foo"), []byte("bar")) - equals(t, err, bolt.ErrTxNotWritable) - return nil - }) -} - -// Ensure that a bucket can delete an existing key. -func TestBucket_Delete(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")) - ok(t, err) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert(t, value == nil, "") - return nil - }) -} - -// Ensure that deleting a large set of keys will work correctly. -func TestBucket_Delete_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - var b, _ = tx.CreateBucket([]byte("widgets")) - for i := 0; i < 100; i++ { - ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024)))) - } - return nil - }) - db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 100; i++ { - ok(t, b.Delete([]byte(strconv.Itoa(i)))) - } - return nil - }) - db.View(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 100; i++ { - assert(t, b.Get([]byte(strconv.Itoa(i))) == nil, "") - } - return nil - }) -} - -// Deleting a very large list of keys will cause the freelist to use overflow. -func TestBucket_Delete_FreelistOverflow(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - db := NewTestDB() - defer db.Close() - k := make([]byte, 16) - for i := uint64(0); i < 10000; i++ { - err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("0")) - if err != nil { - t.Fatalf("bucket error: %s", err) - } - - for j := uint64(0); j < 1000; j++ { - binary.BigEndian.PutUint64(k[:8], i) - binary.BigEndian.PutUint64(k[8:], j) - if err := b.Put(k, nil); err != nil { - t.Fatalf("put error: %s", err) - } - } - - return nil - }) - - if err != nil { - t.Fatalf("update error: %s", err) - } - } - - // Delete all of them in one large transaction - err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("0")) - c := b.Cursor() - for k, _ := c.First(); k != nil; k, _ = c.Next() { - b.Delete(k) - } - return nil - }) - - // Check that a freelist overflow occurred. - ok(t, err) -} - -// Ensure that accessing and updating nested buckets is ok across transactions. -func TestBucket_Nested(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - // Create a widgets bucket. - b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - - // Create a widgets/foo bucket. - _, err = b.CreateBucket([]byte("foo")) - ok(t, err) - - // Create a widgets/bar key. - ok(t, b.Put([]byte("bar"), []byte("0000"))) - - return nil - }) - db.MustCheck() - - // Update widgets/bar. - db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - ok(t, b.Put([]byte("bar"), []byte("xxxx"))) - return nil - }) - db.MustCheck() - - // Cause a split. - db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 10000; i++ { - ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))) - } - return nil - }) - db.MustCheck() - - // Insert into widgets/foo/baz. - db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - ok(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy"))) - return nil - }) - db.MustCheck() - - // Verify. - db.View(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - equals(t, []byte("yyyy"), b.Bucket([]byte("foo")).Get([]byte("baz"))) - equals(t, []byte("xxxx"), b.Get([]byte("bar"))) - for i := 0; i < 10000; i++ { - equals(t, []byte(strconv.Itoa(i)), b.Get([]byte(strconv.Itoa(i)))) - } - return nil - }) -} - -// Ensure that deleting a bucket using Delete() returns an error. -func TestBucket_Delete_Bucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - _, err := b.CreateBucket([]byte("foo")) - ok(t, err) - equals(t, bolt.ErrIncompatibleValue, b.Delete([]byte("foo"))) - return nil - }) -} - -// Ensure that deleting a key on a read-only bucket returns an error. -func TestBucket_Delete_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - err := b.Delete([]byte("foo")) - equals(t, err, bolt.ErrTxNotWritable) - return nil - }) -} - -// Ensure that a deleting value while the transaction is closed returns an error. -func TestBucket_Delete_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - equals(t, bolt.ErrTxClosed, b.Delete([]byte("foo"))) -} - -// Ensure that deleting a bucket causes nested buckets to be deleted. -func TestBucket_DeleteBucket_Nested(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) - ok(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) - return nil - }) -} - -// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed. -func TestBucket_DeleteBucket_Nested2(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) - return nil - }) - db.Update(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") - assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) != nil, "") - assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")) != nil, "") - equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz"))) - ok(t, tx.DeleteBucket([]byte("widgets"))) - return nil - }) - db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) == nil, "") - return nil - }) -} - -// Ensure that deleting a child bucket with multiple pages causes all pages to get collected. -func TestBucket_DeleteBucket_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) - for i := 0; i < 1000; i++ { - ok(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i)))) - } - return nil - }) - db.Update(func(tx *bolt.Tx) error { - ok(t, tx.DeleteBucket([]byte("widgets"))) - return nil - }) - - // NOTE: Consistency check in TestDB.Close() will panic if pages not freed properly. -} - -// Ensure that a simple value retrieved via Bucket() returns a nil. -func TestBucket_Bucket_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) == nil, "") - return nil - }) -} - -// Ensure that creating a bucket on an existing non-bucket key returns an error. -func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - equals(t, bolt.ErrIncompatibleValue, err) - return nil - }) -} - -// Ensure that deleting a bucket on an existing non-bucket key returns an error. -func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) - return nil - }) -} - -// Ensure that a bucket can return an autoincrementing sequence. -func TestBucket_NextSequence(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.CreateBucket([]byte("woojits")) - - // Make sure sequence increments. - seq, err := tx.Bucket([]byte("widgets")).NextSequence() - ok(t, err) - equals(t, seq, uint64(1)) - seq, err = tx.Bucket([]byte("widgets")).NextSequence() - ok(t, err) - equals(t, seq, uint64(2)) - - // Buckets should be separate. - seq, err = tx.Bucket([]byte("woojits")).NextSequence() - ok(t, err) - equals(t, seq, uint64(1)) - return nil - }) -} - -// Ensure that a bucket will persist an autoincrementing sequence even if its -// the only thing updated on the bucket. -// https://github.com/boltdb/bolt/issues/296 -func TestBucket_NextSequence_Persist(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, _ = tx.CreateBucket([]byte("widgets")) - return nil - }) - - db.Update(func(tx *bolt.Tx) error { - _, _ = tx.Bucket([]byte("widgets")).NextSequence() - return nil - }) - - db.Update(func(tx *bolt.Tx) error { - seq, err := tx.Bucket([]byte("widgets")).NextSequence() - if err != nil { - t.Fatalf("unexpected error: %s", err) - } else if seq != 2 { - t.Fatalf("unexpected sequence: %d", seq) - } - return nil - }) -} - -// Ensure that retrieving the next sequence on a read-only bucket returns an error. -func TestBucket_NextSequence_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - i, err := b.NextSequence() - equals(t, i, uint64(0)) - equals(t, err, bolt.ErrTxNotWritable) - return nil - }) -} - -// Ensure that retrieving the next sequence for a bucket on a closed database return an error. -func TestBucket_NextSequence_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - _, err := b.NextSequence() - equals(t, bolt.ErrTxClosed, err) -} - -// Ensure a user can loop over all key/value pairs in a bucket. -func TestBucket_ForEach(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0001")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0002")) - - var index int - err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - switch index { - case 0: - equals(t, k, []byte("bar")) - equals(t, v, []byte("0002")) - case 1: - equals(t, k, []byte("baz")) - equals(t, v, []byte("0001")) - case 2: - equals(t, k, []byte("foo")) - equals(t, v, []byte("0000")) - } - index++ - return nil - }) - ok(t, err) - equals(t, index, 3) - return nil - }) -} - -// Ensure a database can stop iteration early. -func TestBucket_ForEach_ShortCircuit(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000")) - - var index int - err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - index++ - if bytes.Equal(k, []byte("baz")) { - return errors.New("marker") - } - return nil - }) - equals(t, errors.New("marker"), err) - equals(t, 2, index) - return nil - }) -} - -// Ensure that looping over a bucket on a closed database returns an error. -func TestBucket_ForEach_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - err := b.ForEach(func(k, v []byte) error { return nil }) - equals(t, bolt.ErrTxClosed, err) -} - -// Ensure that an error is returned when inserting with an empty key. -func TestBucket_Put_EmptyKey(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar")) - equals(t, err, bolt.ErrKeyRequired) - err = tx.Bucket([]byte("widgets")).Put(nil, []byte("bar")) - equals(t, err, bolt.ErrKeyRequired) - return nil - }) -} - -// Ensure that an error is returned when inserting with a key that's too large. -func TestBucket_Put_KeyTooLarge(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar")) - equals(t, err, bolt.ErrKeyTooLarge) - return nil - }) -} - -// Ensure that an error is returned when inserting a value that's too large. -func TestBucket_Put_ValueTooLarge(t *testing.T) { - if os.Getenv("DRONE") == "true" { - t.Skip("not enough RAM for test") - } - - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)) - equals(t, err, bolt.ErrValueTooLarge) - return nil - }) -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats(t *testing.T) { - db := NewTestDB() - defer db.Close() - - // Add bucket with fewer keys but one big value. - big_key := []byte("really-big-value") - for i := 0; i < 500; i++ { - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) - return b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))) - }) - } - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) - return b.Put(big_key, []byte(strings.Repeat("*", 10000))) - }) - - db.MustCheck() - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("woojits")) - stats := b.Stats() - equals(t, 1, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 7, stats.LeafPageN) - equals(t, 2, stats.LeafOverflowN) - equals(t, 501, stats.KeyN) - equals(t, 2, stats.Depth) - - branchInuse := 16 // branch page header - branchInuse += 7 * 16 // branch elements - branchInuse += 7 * 3 // branch keys (6 3-byte keys) - equals(t, branchInuse, stats.BranchInuse) - - leafInuse := 7 * 16 // leaf page header - leafInuse += 501 * 16 // leaf elements - leafInuse += 500*3 + len(big_key) // leaf keys - leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values - equals(t, leafInuse, stats.LeafInuse) - - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 4096, stats.BranchAlloc) - equals(t, 36864, stats.LeafAlloc) - } - - equals(t, 1, stats.BucketN) - equals(t, 0, stats.InlineBucketN) - equals(t, 0, stats.InlineBucketInuse) - return nil - }) -} - -// Ensure a bucket with random insertion utilizes fill percentage correctly. -func TestBucket_Stats_RandomFill(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } else if os.Getpagesize() != 4096 { - t.Skip("invalid page size for test") - } - - db := NewTestDB() - defer db.Close() - - // Add a set of values in random order. It will be the same random - // order so we can maintain consistency between test runs. - var count int - r := rand.New(rand.NewSource(42)) - for _, i := range r.Perm(1000) { - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) - b.FillPercent = 0.9 - for _, j := range r.Perm(100) { - index := (j * 10000) + i - b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")) - count++ - } - return nil - }) - } - db.MustCheck() - - db.View(func(tx *bolt.Tx) error { - s := tx.Bucket([]byte("woojits")).Stats() - equals(t, 100000, s.KeyN) - - equals(t, 98, s.BranchPageN) - equals(t, 0, s.BranchOverflowN) - equals(t, 130984, s.BranchInuse) - equals(t, 401408, s.BranchAlloc) - - equals(t, 3412, s.LeafPageN) - equals(t, 0, s.LeafOverflowN) - equals(t, 4742482, s.LeafInuse) - equals(t, 13975552, s.LeafAlloc) - return nil - }) -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats_Small(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - // Add a bucket that fits on a single root leaf. - b, err := tx.CreateBucket([]byte("whozawhats")) - ok(t, err) - b.Put([]byte("foo"), []byte("bar")) - - return nil - }) - db.MustCheck() - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("whozawhats")) - stats := b.Stats() - equals(t, 0, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 0, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 1, stats.KeyN) - equals(t, 1, stats.Depth) - equals(t, 0, stats.BranchInuse) - equals(t, 0, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 0, stats.BranchAlloc) - equals(t, 0, stats.LeafAlloc) - } - equals(t, 1, stats.BucketN) - equals(t, 1, stats.InlineBucketN) - equals(t, 16+16+6, stats.InlineBucketInuse) - return nil - }) -} - -func TestBucket_Stats_EmptyBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - // Add a bucket that fits on a single root leaf. - _, err := tx.CreateBucket([]byte("whozawhats")) - ok(t, err) - return nil - }) - db.MustCheck() - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("whozawhats")) - stats := b.Stats() - equals(t, 0, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 0, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 0, stats.KeyN) - equals(t, 1, stats.Depth) - equals(t, 0, stats.BranchInuse) - equals(t, 0, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 0, stats.BranchAlloc) - equals(t, 0, stats.LeafAlloc) - } - equals(t, 1, stats.BucketN) - equals(t, 1, stats.InlineBucketN) - equals(t, 16, stats.InlineBucketInuse) - return nil - }) -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats_Nested(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("foo")) - ok(t, err) - for i := 0; i < 100; i++ { - b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))) - } - bar, err := b.CreateBucket([]byte("bar")) - ok(t, err) - for i := 0; i < 10; i++ { - bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) - } - baz, err := bar.CreateBucket([]byte("baz")) - ok(t, err) - for i := 0; i < 10; i++ { - baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) - } - return nil - }) - - db.MustCheck() - - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("foo")) - stats := b.Stats() - equals(t, 0, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 2, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 122, stats.KeyN) - equals(t, 3, stats.Depth) - equals(t, 0, stats.BranchInuse) - - foo := 16 // foo (pghdr) - foo += 101 * 16 // foo leaf elements - foo += 100*2 + 100*2 // foo leaf key/values - foo += 3 + 16 // foo -> bar key/value - - bar := 16 // bar (pghdr) - bar += 11 * 16 // bar leaf elements - bar += 10 + 10 // bar leaf key/values - bar += 3 + 16 // bar -> baz key/value - - baz := 16 // baz (inline) (pghdr) - baz += 10 * 16 // baz leaf elements - baz += 10 + 10 // baz leaf key/values - - equals(t, foo+bar+baz, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 0, stats.BranchAlloc) - equals(t, 8192, stats.LeafAlloc) - } - equals(t, 3, stats.BucketN) - equals(t, 1, stats.InlineBucketN) - equals(t, baz, stats.InlineBucketInuse) - return nil - }) -} - -// Ensure a large bucket can calculate stats. -func TestBucket_Stats_Large(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - db := NewTestDB() - defer db.Close() - - var index int - for i := 0; i < 100; i++ { - db.Update(func(tx *bolt.Tx) error { - // Add bucket with lots of keys. - b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) - for i := 0; i < 1000; i++ { - b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))) - index++ - } - return nil - }) - } - db.MustCheck() - - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - stats := b.Stats() - equals(t, 13, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 1196, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 100000, stats.KeyN) - equals(t, 3, stats.Depth) - equals(t, 25257, stats.BranchInuse) - equals(t, 2596916, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 53248, stats.BranchAlloc) - equals(t, 4898816, stats.LeafAlloc) - } - equals(t, 1, stats.BucketN) - equals(t, 0, stats.InlineBucketN) - equals(t, 0, stats.InlineBucketInuse) - return nil - }) -} - -// Ensure that a bucket can write random keys and values across multiple transactions. -func TestBucket_Put_Single(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - index := 0 - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - - m := make(map[string][]byte) - - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - for _, item := range items { - db.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil { - panic("put error: " + err.Error()) - } - m[string(item.Key)] = item.Value - return nil - }) - - // Verify all key/values so far. - db.View(func(tx *bolt.Tx) error { - i := 0 - for k, v := range m { - value := tx.Bucket([]byte("widgets")).Get([]byte(k)) - if !bytes.Equal(value, v) { - t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v) - db.CopyTempFile() - t.FailNow() - } - i++ - } - return nil - }) - } - - index++ - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can insert multiple key/value pairs at once. -func TestBucket_Put_Multiple(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - // Bulk insert all values. - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) - } - return nil - }) - ok(t, err) - - // Verify all items exist. - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - value := b.Get(item.Key) - if !bytes.Equal(item.Value, value) { - db.CopyTempFile() - t.Fatalf("exp=%x; got=%x", item.Value, value) - } - } - return nil - }) - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can delete all key/value pairs and return to a single leaf page. -func TestBucket_Delete_Quick(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - // Bulk insert all values. - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) - } - return nil - }) - ok(t, err) - - // Remove items one at a time and check consistency. - for _, item := range items { - err := db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Delete(item.Key) - }) - ok(t, err) - } - - // Anything before our deletion index should be nil. - db.View(func(tx *bolt.Tx) error { - tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3)) - return nil - }) - return nil - }) - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -func ExampleBucket_Put() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start a write transaction. - db.Update(func(tx *bolt.Tx) error { - // Create a bucket. - tx.CreateBucket([]byte("widgets")) - - // Set the value "bar" for the key "foo". - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - return nil - }) - - // Read value back in a different read-only transaction. - db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value of 'foo' is: %s\n", value) - return nil - }) - - // Output: - // The value of 'foo' is: bar -} - -func ExampleBucket_Delete() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start a write transaction. - db.Update(func(tx *bolt.Tx) error { - // Create a bucket. - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - - // Set the value "bar" for the key "foo". - b.Put([]byte("foo"), []byte("bar")) - - // Retrieve the key back from the database and verify it. - value := b.Get([]byte("foo")) - fmt.Printf("The value of 'foo' was: %s\n", value) - return nil - }) - - // Delete the key in a different write transaction. - db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Delete([]byte("foo")) - }) - - // Retrieve the key again. - db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - if value == nil { - fmt.Printf("The value of 'foo' is now: nil\n") - } - return nil - }) - - // Output: - // The value of 'foo' was: bar - // The value of 'foo' is now: nil -} - -func ExampleBucket_ForEach() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Insert data into a bucket. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("animals")) - b := tx.Bucket([]byte("animals")) - b.Put([]byte("dog"), []byte("fun")) - b.Put([]byte("cat"), []byte("lame")) - b.Put([]byte("liger"), []byte("awesome")) - - // Iterate over items in sorted key order. - b.ForEach(func(k, v []byte) error { - fmt.Printf("A %s is %s.\n", k, v) - return nil - }) - return nil - }) - - // Output: - // A cat is lame. - // A dog is fun. - // A liger is awesome. -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go deleted file mode 100644 index c41ebe404..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go +++ /dev/null @@ -1,1529 +0,0 @@ -package main - -import ( - "bytes" - "encoding/binary" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "runtime" - "runtime/pprof" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" - "unsafe" - - "github.com/boltdb/bolt" -) - -var ( - // ErrUsage is returned when a usage message was printed and the process - // should simply exit with an error. - ErrUsage = errors.New("usage") - - // ErrUnknownCommand is returned when a CLI command is not specified. - ErrUnknownCommand = errors.New("unknown command") - - // ErrPathRequired is returned when the path to a Bolt database is not specified. - ErrPathRequired = errors.New("path required") - - // ErrFileNotFound is returned when a Bolt database does not exist. - ErrFileNotFound = errors.New("file not found") - - // ErrInvalidValue is returned when a benchmark reads an unexpected value. - ErrInvalidValue = errors.New("invalid value") - - // ErrCorrupt is returned when a checking a data file finds errors. - ErrCorrupt = errors.New("invalid value") - - // ErrNonDivisibleBatchSize is returned when the batch size can't be evenly - // divided by the iteration count. - ErrNonDivisibleBatchSize = errors.New("number of iterations must be divisible by the batch size") - - // ErrPageIDRequired is returned when a required page id is not specified. - ErrPageIDRequired = errors.New("page id required") - - // ErrPageNotFound is returned when specifying a page above the high water mark. - ErrPageNotFound = errors.New("page not found") - - // ErrPageFreed is returned when reading a page that has already been freed. - ErrPageFreed = errors.New("page freed") -) - -// PageHeaderSize represents the size of the bolt.page header. -const PageHeaderSize = 16 - -func main() { - m := NewMain() - if err := m.Run(os.Args[1:]...); err == ErrUsage { - os.Exit(2) - } else if err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } -} - -// Main represents the main program execution. -type Main struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewMain returns a new instance of Main connect to the standard input/output. -func NewMain() *Main { - return &Main{ - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run executes the program. -func (m *Main) Run(args ...string) error { - // Require a command at the beginning. - if len(args) == 0 || strings.HasPrefix(args[0], "-") { - fmt.Fprintln(m.Stderr, m.Usage()) - return ErrUsage - } - - // Execute command. - switch args[0] { - case "help": - fmt.Fprintln(m.Stderr, m.Usage()) - return ErrUsage - case "bench": - return newBenchCommand(m).Run(args[1:]...) - case "check": - return newCheckCommand(m).Run(args[1:]...) - case "dump": - return newDumpCommand(m).Run(args[1:]...) - case "info": - return newInfoCommand(m).Run(args[1:]...) - case "page": - return newPageCommand(m).Run(args[1:]...) - case "pages": - return newPagesCommand(m).Run(args[1:]...) - case "stats": - return newStatsCommand(m).Run(args[1:]...) - default: - return ErrUnknownCommand - } -} - -// Usage returns the help message. -func (m *Main) Usage() string { - return strings.TrimLeft(` -Bolt is a tool for inspecting bolt databases. - -Usage: - - bolt command [arguments] - -The commands are: - - bench run synthetic benchmark against bolt - check verifies integrity of bolt database - info print basic info - help print this screen - pages print list of pages with their types - stats iterate over all pages and generate usage stats - -Use "bolt [command] -h" for more information about a command. -`, "\n") -} - -// CheckCommand represents the "check" command execution. -type CheckCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewCheckCommand returns a CheckCommand. -func newCheckCommand(m *Main) *CheckCommand { - return &CheckCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *CheckCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - // Perform consistency check. - return db.View(func(tx *bolt.Tx) error { - var count int - ch := tx.Check() - loop: - for { - select { - case err, ok := <-ch: - if !ok { - break loop - } - fmt.Fprintln(cmd.Stdout, err) - count++ - } - } - - // Print summary of errors. - if count > 0 { - fmt.Fprintf(cmd.Stdout, "%d errors found\n", count) - return ErrCorrupt - } - - // Notify user that database is valid. - fmt.Fprintln(cmd.Stdout, "OK") - return nil - }) -} - -// Usage returns the help message. -func (cmd *CheckCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt check PATH - -Check opens a database at PATH and runs an exhaustive check to verify that -all pages are accessible or are marked as freed. It also verifies that no -pages are double referenced. - -Verification errors will stream out as they are found and the process will -return after all pages have been checked. -`, "\n") -} - -// InfoCommand represents the "info" command execution. -type InfoCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewInfoCommand returns a InfoCommand. -func newInfoCommand(m *Main) *InfoCommand { - return &InfoCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *InfoCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open the database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - // Print basic database info. - info := db.Info() - fmt.Fprintf(cmd.Stdout, "Page Size: %d\n", info.PageSize) - - return nil -} - -// Usage returns the help message. -func (cmd *InfoCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt info PATH - -Info prints basic information about the Bolt database at PATH. -`, "\n") -} - -// DumpCommand represents the "dump" command execution. -type DumpCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// newDumpCommand returns a DumpCommand. -func newDumpCommand(m *Main) *DumpCommand { - return &DumpCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *DumpCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path and page id. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Read page ids. - pageIDs, err := atois(fs.Args()[1:]) - if err != nil { - return err - } else if len(pageIDs) == 0 { - return ErrPageIDRequired - } - - // Open database to retrieve page size. - pageSize, err := ReadPageSize(path) - if err != nil { - return err - } - - // Open database file handler. - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { _ = f.Close() }() - - // Print each page listed. - for i, pageID := range pageIDs { - // Print a separator. - if i > 0 { - fmt.Fprintln(cmd.Stdout, "===============================================") - } - - // Print page to stdout. - if err := cmd.PrintPage(cmd.Stdout, f, pageID, pageSize); err != nil { - return err - } - } - - return nil -} - -// PrintPage prints a given page as hexidecimal. -func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { - const bytesPerLineN = 16 - - // Read page into buffer. - buf := make([]byte, pageSize) - addr := pageID * pageSize - if n, err := r.ReadAt(buf, int64(addr)); err != nil { - return err - } else if n != pageSize { - return io.ErrUnexpectedEOF - } - - // Write out to writer in 16-byte lines. - var prev []byte - var skipped bool - for offset := 0; offset < pageSize; offset += bytesPerLineN { - // Retrieve current 16-byte line. - line := buf[offset : offset+bytesPerLineN] - isLastLine := (offset == (pageSize - bytesPerLineN)) - - // If it's the same as the previous line then print a skip. - if bytes.Equal(line, prev) && !isLastLine { - if !skipped { - fmt.Fprintf(w, "%07x *\n", addr+offset) - skipped = true - } - } else { - // Print line as hexadecimal in 2-byte groups. - fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, - line[0:2], line[2:4], line[4:6], line[6:8], - line[8:10], line[10:12], line[12:14], line[14:16], - ) - - skipped = false - } - - // Save the previous line. - prev = line - } - fmt.Fprint(w, "\n") - - return nil -} - -// Usage returns the help message. -func (cmd *DumpCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt dump -page PAGEID PATH - -Dump prints a hexidecimal dump of a single page. -`, "\n") -} - -// PageCommand represents the "page" command execution. -type PageCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// newPageCommand returns a PageCommand. -func newPageCommand(m *Main) *PageCommand { - return &PageCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *PageCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path and page id. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Read page ids. - pageIDs, err := atois(fs.Args()[1:]) - if err != nil { - return err - } else if len(pageIDs) == 0 { - return ErrPageIDRequired - } - - // Open database file handler. - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { _ = f.Close() }() - - // Print each page listed. - for i, pageID := range pageIDs { - // Print a separator. - if i > 0 { - fmt.Fprintln(cmd.Stdout, "===============================================") - } - - // Retrieve page info and page size. - p, buf, err := ReadPage(path, pageID) - if err != nil { - return err - } - - // Print basic page info. - fmt.Fprintf(cmd.Stdout, "Page ID: %d\n", p.id) - fmt.Fprintf(cmd.Stdout, "Page Type: %s\n", p.Type()) - fmt.Fprintf(cmd.Stdout, "Total Size: %d bytes\n", len(buf)) - - // Print type-specific data. - switch p.Type() { - case "meta": - err = cmd.PrintMeta(cmd.Stdout, buf) - case "leaf": - err = cmd.PrintLeaf(cmd.Stdout, buf) - case "branch": - err = cmd.PrintBranch(cmd.Stdout, buf) - case "freelist": - err = cmd.PrintFreelist(cmd.Stdout, buf) - } - if err != nil { - return err - } - } - - return nil -} - -// PrintMeta prints the data from the meta page. -func (cmd *PageCommand) PrintMeta(w io.Writer, buf []byte) error { - m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) - fmt.Fprintf(w, "Version: %d\n", m.version) - fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) - fmt.Fprintf(w, "Flags: %08x\n", m.flags) - fmt.Fprintf(w, "Root: \n", m.root.root) - fmt.Fprintf(w, "Freelist: \n", m.freelist) - fmt.Fprintf(w, "HWM: \n", m.pgid) - fmt.Fprintf(w, "Txn ID: %d\n", m.txid) - fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) - fmt.Fprintf(w, "\n") - return nil -} - -// PrintLeaf prints the data for a leaf page. -func (cmd *PageCommand) PrintLeaf(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each key/value. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - - // Format key as string. - var k string - if isPrintable(string(e.key())) { - k = fmt.Sprintf("%q", string(e.key())) - } else { - k = fmt.Sprintf("%x", string(e.key())) - } - - // Format value as string. - var v string - if (e.flags & uint32(bucketLeafFlag)) != 0 { - b := (*bucket)(unsafe.Pointer(&e.value()[0])) - v = fmt.Sprintf("", b.root, b.sequence) - } else if isPrintable(string(e.value())) { - k = fmt.Sprintf("%q", string(e.value())) - } else { - k = fmt.Sprintf("%x", string(e.value())) - } - - fmt.Fprintf(w, "%s: %s\n", k, v) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintBranch prints the data for a leaf page. -func (cmd *PageCommand) PrintBranch(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each key/value. - for i := uint16(0); i < p.count; i++ { - e := p.branchPageElement(i) - - // Format key as string. - var k string - if isPrintable(string(e.key())) { - k = fmt.Sprintf("%q", string(e.key())) - } else { - k = fmt.Sprintf("%x", string(e.key())) - } - - fmt.Fprintf(w, "%s: \n", k, e.pgid) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintFreelist prints the data for a freelist page. -func (cmd *PageCommand) PrintFreelist(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each page in the freelist. - ids := (*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)) - for i := uint16(0); i < p.count; i++ { - fmt.Fprintf(w, "%d\n", ids[i]) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintPage prints a given page as hexidecimal. -func (cmd *PageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { - const bytesPerLineN = 16 - - // Read page into buffer. - buf := make([]byte, pageSize) - addr := pageID * pageSize - if n, err := r.ReadAt(buf, int64(addr)); err != nil { - return err - } else if n != pageSize { - return io.ErrUnexpectedEOF - } - - // Write out to writer in 16-byte lines. - var prev []byte - var skipped bool - for offset := 0; offset < pageSize; offset += bytesPerLineN { - // Retrieve current 16-byte line. - line := buf[offset : offset+bytesPerLineN] - isLastLine := (offset == (pageSize - bytesPerLineN)) - - // If it's the same as the previous line then print a skip. - if bytes.Equal(line, prev) && !isLastLine { - if !skipped { - fmt.Fprintf(w, "%07x *\n", addr+offset) - skipped = true - } - } else { - // Print line as hexadecimal in 2-byte groups. - fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, - line[0:2], line[2:4], line[4:6], line[6:8], - line[8:10], line[10:12], line[12:14], line[14:16], - ) - - skipped = false - } - - // Save the previous line. - prev = line - } - fmt.Fprint(w, "\n") - - return nil -} - -// Usage returns the help message. -func (cmd *PageCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt page -page PATH pageid [pageid...] - -Page prints one or more pages in human readable format. -`, "\n") -} - -// PagesCommand represents the "pages" command execution. -type PagesCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewPagesCommand returns a PagesCommand. -func newPagesCommand(m *Main) *PagesCommand { - return &PagesCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *PagesCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer func() { _ = db.Close() }() - - // Write header. - fmt.Fprintln(cmd.Stdout, "ID TYPE ITEMS OVRFLW") - fmt.Fprintln(cmd.Stdout, "======== ========== ====== ======") - - return db.Update(func(tx *bolt.Tx) error { - var id int - for { - p, err := tx.Page(id) - if err != nil { - return &PageError{ID: id, Err: err} - } else if p == nil { - break - } - - // Only display count and overflow if this is a non-free page. - var count, overflow string - if p.Type != "free" { - count = strconv.Itoa(p.Count) - if p.OverflowCount > 0 { - overflow = strconv.Itoa(p.OverflowCount) - } - } - - // Print table row. - fmt.Fprintf(cmd.Stdout, "%-8d %-10s %-6s %-6s\n", p.ID, p.Type, count, overflow) - - // Move to the next non-overflow page. - id += 1 - if p.Type != "free" { - id += p.OverflowCount - } - } - return nil - }) -} - -// Usage returns the help message. -func (cmd *PagesCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt pages PATH - -Pages prints a table of pages with their type (meta, leaf, branch, freelist). -Leaf and branch pages will show a key count in the "items" column while the -freelist will show the number of free pages in the "items" column. - -The "overflow" column shows the number of blocks that the page spills over -into. Normally there is no overflow but large keys and values can cause -a single page to take up multiple blocks. -`, "\n") -} - -// StatsCommand represents the "stats" command execution. -type StatsCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewStatsCommand returns a StatsCommand. -func newStatsCommand(m *Main) *StatsCommand { - return &StatsCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *StatsCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path, prefix := fs.Arg(0), fs.Arg(1) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - return db.View(func(tx *bolt.Tx) error { - var s bolt.BucketStats - var count int - if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { - if bytes.HasPrefix(name, []byte(prefix)) { - s.Add(b.Stats()) - count += 1 - } - return nil - }); err != nil { - return err - } - - fmt.Fprintf(cmd.Stdout, "Aggregate statistics for %d buckets\n\n", count) - - fmt.Fprintln(cmd.Stdout, "Page count statistics") - fmt.Fprintf(cmd.Stdout, "\tNumber of logical branch pages: %d\n", s.BranchPageN) - fmt.Fprintf(cmd.Stdout, "\tNumber of physical branch overflow pages: %d\n", s.BranchOverflowN) - fmt.Fprintf(cmd.Stdout, "\tNumber of logical leaf pages: %d\n", s.LeafPageN) - fmt.Fprintf(cmd.Stdout, "\tNumber of physical leaf overflow pages: %d\n", s.LeafOverflowN) - - fmt.Fprintln(cmd.Stdout, "Tree statistics") - fmt.Fprintf(cmd.Stdout, "\tNumber of keys/value pairs: %d\n", s.KeyN) - fmt.Fprintf(cmd.Stdout, "\tNumber of levels in B+tree: %d\n", s.Depth) - - fmt.Fprintln(cmd.Stdout, "Page size utilization") - fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical branch pages: %d\n", s.BranchAlloc) - var percentage int - if s.BranchAlloc != 0 { - percentage = int(float32(s.BranchInuse) * 100.0 / float32(s.BranchAlloc)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes actually used for branch data: %d (%d%%)\n", s.BranchInuse, percentage) - fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical leaf pages: %d\n", s.LeafAlloc) - percentage = 0 - if s.LeafAlloc != 0 { - percentage = int(float32(s.LeafInuse) * 100.0 / float32(s.LeafAlloc)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes actually used for leaf data: %d (%d%%)\n", s.LeafInuse, percentage) - - fmt.Fprintln(cmd.Stdout, "Bucket statistics") - fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN) - percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) - fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage) - percentage = 0 - if s.LeafInuse != 0 { - percentage = int(float32(s.InlineBucketInuse) * 100.0 / float32(s.LeafInuse)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes used for inlined buckets: %d (%d%%)\n", s.InlineBucketInuse, percentage) - - return nil - }) -} - -// Usage returns the help message. -func (cmd *StatsCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt stats PATH - -Stats performs an extensive search of the database to track every page -reference. It starts at the current meta page and recursively iterates -through every accessible bucket. - -The following errors can be reported: - - already freed - The page is referenced more than once in the freelist. - - unreachable unfreed - The page is not referenced by a bucket or in the freelist. - - reachable freed - The page is referenced by a bucket but is also in the freelist. - - out of bounds - A page is referenced that is above the high water mark. - - multiple references - A page is referenced by more than one other page. - - invalid type - The page type is not "meta", "leaf", "branch", or "freelist". - -No errors should occur in your database. However, if for some reason you -experience corruption, please submit a ticket to the Bolt project page: - - https://github.com/boltdb/bolt/issues -`, "\n") -} - -var benchBucketName = []byte("bench") - -// BenchCommand represents the "bench" command execution. -type BenchCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewBenchCommand returns a BenchCommand using the -func newBenchCommand(m *Main) *BenchCommand { - return &BenchCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the "bench" command. -func (cmd *BenchCommand) Run(args ...string) error { - // Parse CLI arguments. - options, err := cmd.ParseFlags(args) - if err != nil { - return err - } - - // Remove path if "-work" is not set. Otherwise keep path. - if options.Work { - fmt.Fprintf(cmd.Stdout, "work: %s\n", options.Path) - } else { - defer os.Remove(options.Path) - } - - // Create database. - db, err := bolt.Open(options.Path, 0666, nil) - if err != nil { - return err - } - db.NoSync = options.NoSync - defer db.Close() - - // Write to the database. - var results BenchResults - if err := cmd.runWrites(db, options, &results); err != nil { - return fmt.Errorf("write: %v", err) - } - - // Read from the database. - if err := cmd.runReads(db, options, &results); err != nil { - return fmt.Errorf("bench: read: %s", err) - } - - // Print results. - fmt.Fprintf(os.Stderr, "# Write\t%v\t(%v/op)\t(%v op/sec)\n", results.WriteDuration, results.WriteOpDuration(), results.WriteOpsPerSecond()) - fmt.Fprintf(os.Stderr, "# Read\t%v\t(%v/op)\t(%v op/sec)\n", results.ReadDuration, results.ReadOpDuration(), results.ReadOpsPerSecond()) - fmt.Fprintln(os.Stderr, "") - return nil -} - -// ParseFlags parses the command line flags. -func (cmd *BenchCommand) ParseFlags(args []string) (*BenchOptions, error) { - var options BenchOptions - - // Parse flagset. - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.StringVar(&options.ProfileMode, "profile-mode", "rw", "") - fs.StringVar(&options.WriteMode, "write-mode", "seq", "") - fs.StringVar(&options.ReadMode, "read-mode", "seq", "") - fs.IntVar(&options.Iterations, "count", 1000, "") - fs.IntVar(&options.BatchSize, "batch-size", 0, "") - fs.IntVar(&options.KeySize, "key-size", 8, "") - fs.IntVar(&options.ValueSize, "value-size", 32, "") - fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") - fs.StringVar(&options.MemProfile, "memprofile", "", "") - fs.StringVar(&options.BlockProfile, "blockprofile", "", "") - fs.Float64Var(&options.FillPercent, "fill-percent", bolt.DefaultFillPercent, "") - fs.BoolVar(&options.NoSync, "no-sync", false, "") - fs.BoolVar(&options.Work, "work", false, "") - fs.StringVar(&options.Path, "path", "", "") - fs.SetOutput(cmd.Stderr) - if err := fs.Parse(args); err != nil { - return nil, err - } - - // Set batch size to iteration size if not set. - // Require that batch size can be evenly divided by the iteration count. - if options.BatchSize == 0 { - options.BatchSize = options.Iterations - } else if options.Iterations%options.BatchSize != 0 { - return nil, ErrNonDivisibleBatchSize - } - - // Generate temp path if one is not passed in. - if options.Path == "" { - f, err := ioutil.TempFile("", "bolt-bench-") - if err != nil { - return nil, fmt.Errorf("temp file: %s", err) - } - f.Close() - os.Remove(f.Name()) - options.Path = f.Name() - } - - return &options, nil -} - -// Writes to the database. -func (cmd *BenchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - // Start profiling for writes. - if options.ProfileMode == "rw" || options.ProfileMode == "w" { - cmd.startProfiling(options) - } - - t := time.Now() - - var err error - switch options.WriteMode { - case "seq": - err = cmd.runWritesSequential(db, options, results) - case "rnd": - err = cmd.runWritesRandom(db, options, results) - case "seq-nest": - err = cmd.runWritesSequentialNested(db, options, results) - case "rnd-nest": - err = cmd.runWritesRandomNested(db, options, results) - default: - return fmt.Errorf("invalid write mode: %s", options.WriteMode) - } - - // Save time to write. - results.WriteDuration = time.Since(t) - - // Stop profiling for writes only. - if options.ProfileMode == "w" { - cmd.stopProfiling() - } - - return err -} - -func (cmd *BenchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - var i = uint32(0) - return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) -} - -func (cmd *BenchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) -} - -func (cmd *BenchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - var i = uint32(0) - return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) -} - -func (cmd *BenchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) -} - -func (cmd *BenchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { - results.WriteOps = options.Iterations - - for i := 0; i < options.Iterations; i += options.BatchSize { - if err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists(benchBucketName) - b.FillPercent = options.FillPercent - - for j := 0; j < options.BatchSize; j++ { - key := make([]byte, options.KeySize) - value := make([]byte, options.ValueSize) - - // Write key as uint32. - binary.BigEndian.PutUint32(key, keySource()) - - // Insert key/value. - if err := b.Put(key, value); err != nil { - return err - } - } - - return nil - }); err != nil { - return err - } - } - return nil -} - -func (cmd *BenchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { - results.WriteOps = options.Iterations - - for i := 0; i < options.Iterations; i += options.BatchSize { - if err := db.Update(func(tx *bolt.Tx) error { - top, err := tx.CreateBucketIfNotExists(benchBucketName) - if err != nil { - return err - } - top.FillPercent = options.FillPercent - - // Create bucket key. - name := make([]byte, options.KeySize) - binary.BigEndian.PutUint32(name, keySource()) - - // Create bucket. - b, err := top.CreateBucketIfNotExists(name) - if err != nil { - return err - } - b.FillPercent = options.FillPercent - - for j := 0; j < options.BatchSize; j++ { - var key = make([]byte, options.KeySize) - var value = make([]byte, options.ValueSize) - - // Generate key as uint32. - binary.BigEndian.PutUint32(key, keySource()) - - // Insert value into subbucket. - if err := b.Put(key, value); err != nil { - return err - } - } - - return nil - }); err != nil { - return err - } - } - return nil -} - -// Reads from the database. -func (cmd *BenchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - // Start profiling for reads. - if options.ProfileMode == "r" { - cmd.startProfiling(options) - } - - t := time.Now() - - var err error - switch options.ReadMode { - case "seq": - switch options.WriteMode { - case "seq-nest", "rnd-nest": - err = cmd.runReadsSequentialNested(db, options, results) - default: - err = cmd.runReadsSequential(db, options, results) - } - default: - return fmt.Errorf("invalid read mode: %s", options.ReadMode) - } - - // Save read time. - results.ReadDuration = time.Since(t) - - // Stop profiling for reads. - if options.ProfileMode == "rw" || options.ProfileMode == "r" { - cmd.stopProfiling() - } - - return err -} - -func (cmd *BenchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - return db.View(func(tx *bolt.Tx) error { - t := time.Now() - - for { - var count int - - c := tx.Bucket(benchBucketName).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if v == nil { - return errors.New("invalid value") - } - count++ - } - - if options.WriteMode == "seq" && count != options.Iterations { - return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, count) - } - - results.ReadOps += count - - // Make sure we do this for at least a second. - if time.Since(t) >= time.Second { - break - } - } - - return nil - }) -} - -func (cmd *BenchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - return db.View(func(tx *bolt.Tx) error { - t := time.Now() - - for { - var count int - var top = tx.Bucket(benchBucketName) - if err := top.ForEach(func(name, _ []byte) error { - c := top.Bucket(name).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if v == nil { - return ErrInvalidValue - } - count++ - } - return nil - }); err != nil { - return err - } - - if options.WriteMode == "seq-nest" && count != options.Iterations { - return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, count) - } - - results.ReadOps += count - - // Make sure we do this for at least a second. - if time.Since(t) >= time.Second { - break - } - } - - return nil - }) -} - -// File handlers for the various profiles. -var cpuprofile, memprofile, blockprofile *os.File - -// Starts all profiles set on the options. -func (cmd *BenchCommand) startProfiling(options *BenchOptions) { - var err error - - // Start CPU profiling. - if options.CPUProfile != "" { - cpuprofile, err = os.Create(options.CPUProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create cpu profile %q: %v\n", options.CPUProfile, err) - os.Exit(1) - } - pprof.StartCPUProfile(cpuprofile) - } - - // Start memory profiling. - if options.MemProfile != "" { - memprofile, err = os.Create(options.MemProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create memory profile %q: %v\n", options.MemProfile, err) - os.Exit(1) - } - runtime.MemProfileRate = 4096 - } - - // Start fatal profiling. - if options.BlockProfile != "" { - blockprofile, err = os.Create(options.BlockProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create block profile %q: %v\n", options.BlockProfile, err) - os.Exit(1) - } - runtime.SetBlockProfileRate(1) - } -} - -// Stops all profiles. -func (cmd *BenchCommand) stopProfiling() { - if cpuprofile != nil { - pprof.StopCPUProfile() - cpuprofile.Close() - cpuprofile = nil - } - - if memprofile != nil { - pprof.Lookup("heap").WriteTo(memprofile, 0) - memprofile.Close() - memprofile = nil - } - - if blockprofile != nil { - pprof.Lookup("block").WriteTo(blockprofile, 0) - blockprofile.Close() - blockprofile = nil - runtime.SetBlockProfileRate(0) - } -} - -// BenchOptions represents the set of options that can be passed to "bolt bench". -type BenchOptions struct { - ProfileMode string - WriteMode string - ReadMode string - Iterations int - BatchSize int - KeySize int - ValueSize int - CPUProfile string - MemProfile string - BlockProfile string - StatsInterval time.Duration - FillPercent float64 - NoSync bool - Work bool - Path string -} - -// BenchResults represents the performance results of the benchmark. -type BenchResults struct { - WriteOps int - WriteDuration time.Duration - ReadOps int - ReadDuration time.Duration -} - -// Returns the duration for a single write operation. -func (r *BenchResults) WriteOpDuration() time.Duration { - if r.WriteOps == 0 { - return 0 - } - return r.WriteDuration / time.Duration(r.WriteOps) -} - -// Returns average number of write operations that can be performed per second. -func (r *BenchResults) WriteOpsPerSecond() int { - var op = r.WriteOpDuration() - if op == 0 { - return 0 - } - return int(time.Second) / int(op) -} - -// Returns the duration for a single read operation. -func (r *BenchResults) ReadOpDuration() time.Duration { - if r.ReadOps == 0 { - return 0 - } - return r.ReadDuration / time.Duration(r.ReadOps) -} - -// Returns average number of read operations that can be performed per second. -func (r *BenchResults) ReadOpsPerSecond() int { - var op = r.ReadOpDuration() - if op == 0 { - return 0 - } - return int(time.Second) / int(op) -} - -type PageError struct { - ID int - Err error -} - -func (e *PageError) Error() string { - return fmt.Sprintf("page error: id=%d, err=%s", e.ID, e.Err) -} - -// isPrintable returns true if the string is valid unicode and contains only printable runes. -func isPrintable(s string) bool { - if !utf8.ValidString(s) { - return false - } - for _, ch := range s { - if !unicode.IsPrint(ch) { - return false - } - } - return true -} - -// ReadPage reads page info & full page data from a path. -// This is not transactionally safe. -func ReadPage(path string, pageID int) (*page, []byte, error) { - // Find page size. - pageSize, err := ReadPageSize(path) - if err != nil { - return nil, nil, fmt.Errorf("read page size: %s", err) - } - - // Open database file. - f, err := os.Open(path) - if err != nil { - return nil, nil, err - } - defer f.Close() - - // Read one block into buffer. - buf := make([]byte, pageSize) - if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { - return nil, nil, err - } else if n != len(buf) { - return nil, nil, io.ErrUnexpectedEOF - } - - // Determine total number of blocks. - p := (*page)(unsafe.Pointer(&buf[0])) - overflowN := p.overflow - - // Re-read entire page (with overflow) into buffer. - buf = make([]byte, (int(overflowN)+1)*pageSize) - if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { - return nil, nil, err - } else if n != len(buf) { - return nil, nil, io.ErrUnexpectedEOF - } - p = (*page)(unsafe.Pointer(&buf[0])) - - return p, buf, nil -} - -// ReadPageSize reads page size a path. -// This is not transactionally safe. -func ReadPageSize(path string) (int, error) { - // Open database file. - f, err := os.Open(path) - if err != nil { - return 0, err - } - defer f.Close() - - // Read 4KB chunk. - buf := make([]byte, 4096) - if _, err := io.ReadFull(f, buf); err != nil { - return 0, err - } - - // Read page size from metadata. - m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) - return int(m.pageSize), nil -} - -// atois parses a slice of strings into integers. -func atois(strs []string) ([]int, error) { - var a []int - for _, str := range strs { - i, err := strconv.Atoi(str) - if err != nil { - return nil, err - } - a = append(a, i) - } - return a, nil -} - -// DO NOT EDIT. Copied from the "bolt" package. -const maxAllocSize = 0xFFFFFFF - -// DO NOT EDIT. Copied from the "bolt" package. -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -// DO NOT EDIT. Copied from the "bolt" package. -const bucketLeafFlag = 0x01 - -// DO NOT EDIT. Copied from the "bolt" package. -type pgid uint64 - -// DO NOT EDIT. Copied from the "bolt" package. -type txid uint64 - -// DO NOT EDIT. Copied from the "bolt" package. -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// DO NOT EDIT. Copied from the "bolt" package. -type bucket struct { - root pgid - sequence uint64 -} - -// DO NOT EDIT. Copied from the "bolt" package. -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 - ptr uintptr -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) Type() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) leafPageElement(index uint16) *leafPageElement { - n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) branchPageElement(index uint16) *branchPageElement { - return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] -} - -// DO NOT EDIT. Copied from the "bolt" package. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *branchPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos : n.pos+n.ksize] -} - -// DO NOT EDIT. Copied from the "bolt" package. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *leafPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos : n.pos+n.ksize] -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *leafPageElement) value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize] -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go deleted file mode 100644 index b9e8c671f..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package main_test - -import ( - "bytes" - "io/ioutil" - "os" - "strconv" - "testing" - - "github.com/boltdb/bolt" - "github.com/boltdb/bolt/cmd/bolt" -) - -// Ensure the "info" command can print information about a database. -func TestInfoCommand_Run(t *testing.T) { - db := MustOpen(0666, nil) - db.DB.Close() - defer db.Close() - - // Run the info command. - m := NewMain() - if err := m.Run("info", db.Path); err != nil { - t.Fatal(err) - } -} - -// Ensure the "stats" command can execute correctly. -func TestStatsCommand_Run(t *testing.T) { - // Ignore - if os.Getpagesize() != 4096 { - t.Skip("system does not use 4KB page size") - } - - db := MustOpen(0666, nil) - defer db.Close() - - if err := db.Update(func(tx *bolt.Tx) error { - // Create "foo" bucket. - b, err := tx.CreateBucket([]byte("foo")) - if err != nil { - return err - } - for i := 0; i < 10; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - return err - } - } - - // Create "bar" bucket. - b, err = tx.CreateBucket([]byte("bar")) - if err != nil { - return err - } - for i := 0; i < 100; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - return err - } - } - - // Create "baz" bucket. - b, err = tx.CreateBucket([]byte("baz")) - if err != nil { - return err - } - if err := b.Put([]byte("key"), []byte("value")); err != nil { - return err - } - - return nil - }); err != nil { - t.Fatal(err) - } - db.DB.Close() - - // Generate expected result. - exp := "Aggregate statistics for 3 buckets\n\n" + - "Page count statistics\n" + - "\tNumber of logical branch pages: 0\n" + - "\tNumber of physical branch overflow pages: 0\n" + - "\tNumber of logical leaf pages: 1\n" + - "\tNumber of physical leaf overflow pages: 0\n" + - "Tree statistics\n" + - "\tNumber of keys/value pairs: 111\n" + - "\tNumber of levels in B+tree: 1\n" + - "Page size utilization\n" + - "\tBytes allocated for physical branch pages: 0\n" + - "\tBytes actually used for branch data: 0 (0%)\n" + - "\tBytes allocated for physical leaf pages: 4096\n" + - "\tBytes actually used for leaf data: 1996 (48%)\n" + - "Bucket statistics\n" + - "\tTotal number of buckets: 3\n" + - "\tTotal number on inlined buckets: 2 (66%)\n" + - "\tBytes used for inlined buckets: 236 (11%)\n" - - // Run the command. - m := NewMain() - if err := m.Run("stats", db.Path); err != nil { - t.Fatal(err) - } else if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) - } -} - -// Main represents a test wrapper for main.Main that records output. -type Main struct { - *main.Main - Stdin bytes.Buffer - Stdout bytes.Buffer - Stderr bytes.Buffer -} - -// NewMain returns a new instance of Main. -func NewMain() *Main { - m := &Main{Main: main.NewMain()} - m.Main.Stdin = &m.Stdin - m.Main.Stdout = &m.Stdout - m.Main.Stderr = &m.Stderr - return m -} - -// MustOpen creates a Bolt database in a temporary location. -func MustOpen(mode os.FileMode, options *bolt.Options) *DB { - // Create temporary path. - f, _ := ioutil.TempFile("", "bolt-") - f.Close() - os.Remove(f.Name()) - - db, err := bolt.Open(f.Name(), mode, options) - if err != nil { - panic(err.Error()) - } - return &DB{DB: db, Path: f.Name()} -} - -// DB is a test wrapper for bolt.DB. -type DB struct { - *bolt.DB - Path string -} - -// Close closes and removes the database. -func (db *DB) Close() error { - defer os.Remove(db.Path) - return db.DB.Close() -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go deleted file mode 100644 index 006c54889..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go +++ /dev/null @@ -1,384 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" -) - -// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. -// Cursors see nested buckets with value == nil. -// Cursors can be obtained from a transaction and are valid as long as the transaction is open. -// -// Keys and values returned from the cursor are only valid for the life of the transaction. -// -// Changing data while traversing with a cursor may cause it to be invalidated -// and return unexpected keys and/or values. You must reposition your cursor -// after mutating data. -type Cursor struct { - bucket *Bucket - stack []elemRef -} - -// Bucket returns the bucket that this cursor was created from. -func (c *Cursor) Bucket() *Bucket { - return c.bucket -} - -// First moves the cursor to the first item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - c.first() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v - -} - -// Last moves the cursor to the last item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - ref := elemRef{page: p, node: n} - ref.index = ref.count() - 1 - c.stack = append(c.stack, ref) - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Next moves the cursor to the next item in the bucket and returns its key and value. -// If the cursor is at the end of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Prev moves the cursor to the previous item in the bucket and returns its key and value. -// If the cursor is at the beginning of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Attempt to move back one element until we're successful. - // Move up the stack as we hit the beginning of each page in our stack. - for i := len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index > 0 { - elem.index-- - break - } - c.stack = c.stack[:i] - } - - // If we've hit the end then return nil. - if len(c.stack) == 0 { - return nil, nil - } - - // Move down the stack to find the last element of the last leaf under this branch. - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. If no keys -// follow, a nil key is returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - k, v, flags := c.seek(seek) - - // If we ended up after the last element of a page then move to the next one. - if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { - k, v, flags = c.next() - } - - if k == nil { - return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Delete removes the current key/value under the cursor from the bucket. -// Delete fails if current key/value is a bucket or if the transaction is not writable. -func (c *Cursor) Delete() error { - if c.bucket.tx.db == nil { - return ErrTxClosed - } else if !c.bucket.Writable() { - return ErrTxNotWritable - } - - key, _, flags := c.keyValue() - // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - c.node().del(key) - - return nil -} - -// seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. -func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Start from root page/node and traverse to correct page. - c.stack = c.stack[:0] - c.search(seek, c.bucket.root) - ref := &c.stack[len(c.stack)-1] - - // If the cursor is pointing to the end of page/node then return nil. - if ref.index >= ref.count() { - return nil, nil, 0 - } - - // If this is a bucket then return a nil value. - return c.keyValue() -} - -// first moves the cursor to the first leaf element under the last page in the stack. -func (c *Cursor) first() { - for { - // Exit when we hit a leaf page. - var ref = &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the first element to the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - } -} - -// last moves the cursor to the last leaf element under the last page in the stack. -func (c *Cursor) last() { - for { - // Exit when we hit a leaf page. - ref := &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the last element in the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - - var nextRef = elemRef{page: p, node: n} - nextRef.index = nextRef.count() - 1 - c.stack = append(c.stack, nextRef) - } -} - -// next moves to the next leaf element and returns the key and value. -// If the cursor is at the last leaf element then it stays there and returns nil. -func (c *Cursor) next() (key []byte, value []byte, flags uint32) { - // Attempt to move over one element until we're successful. - // Move up the stack as we hit the end of each page in our stack. - var i int - for i = len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index < elem.count()-1 { - elem.index++ - break - } - } - - // If we've hit the root page then stop and return. This will leave the - // cursor on the last element of the last page. - if i == -1 { - return nil, nil, 0 - } - - // Otherwise start from where we left off in the stack and find the - // first element of the first leaf page. - c.stack = c.stack[:i+1] - c.first() - return c.keyValue() -} - -// search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgid pgid) { - p, n := c.bucket.pageNode(pgid) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) - } - e := elemRef{page: p, node: n} - c.stack = append(c.stack, e) - - // If we're on a leaf page/node then find the specific node. - if e.isLeaf() { - c.nsearch(key) - return - } - - if n != nil { - c.searchNode(key, n) - return - } - c.searchPage(key, p) -} - -func (c *Cursor) searchNode(key []byte, n *node) { - var exact bool - index := sort.Search(len(n.inodes), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) -} - -func (c *Cursor) searchPage(key []byte, p *page) { - // Binary search for the correct range. - inodes := p.branchPageElements() - - var exact bool - index := sort.Search(int(p.count), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, inodes[index].pgid) -} - -// nsearch searches the leaf node on the top of the stack for a key. -func (c *Cursor) nsearch(key []byte) { - e := &c.stack[len(c.stack)-1] - p, n := e.page, e.node - - // If we have a node then search its inodes. - if n != nil { - index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 - }) - e.index = index - return - } - - // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 - }) - e.index = index -} - -// keyValue returns the key and value of the current leaf element. -func (c *Cursor) keyValue() ([]byte, []byte, uint32) { - ref := &c.stack[len(c.stack)-1] - if ref.count() == 0 || ref.index >= ref.count() { - return nil, nil, 0 - } - - // Retrieve value from node. - if ref.node != nil { - inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags - } - - // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags -} - -// node returns the node that the cursor is currently positioned on. -func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") - - // If the top of the stack is a leaf node then just return it. - if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { - return ref.node - } - - // Start from root and traverse down the hierarchy. - var n = c.stack[0].node - if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) - } - for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") - n = n.childAt(int(ref.index)) - } - _assert(n.isLeaf, "expected leaf node") - return n -} - -// elemRef represents a reference to an element on a given page/node. -type elemRef struct { - page *page - node *node - index int -} - -// isLeaf returns whether the ref is pointing at a leaf page/node. -func (r *elemRef) isLeaf() bool { - if r.node != nil { - return r.node.isLeaf - } - return (r.page.flags & leafPageFlag) != 0 -} - -// count returns the number of inodes or page elements. -func (r *elemRef) count() int { - if r.node != nil { - return len(r.node.inodes) - } - return int(r.page.count) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go deleted file mode 100644 index b12e1f915..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go +++ /dev/null @@ -1,511 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "fmt" - "os" - "sort" - "testing" - "testing/quick" - - "github.com/boltdb/bolt" -) - -// Ensure that a cursor can return a reference to the bucket that created it. -func TestCursor_Bucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) - c := b.Cursor() - equals(t, b, c.Bucket()) - return nil - }) -} - -// Ensure that a Tx cursor can seek to the appropriate keys. -func TestCursor_Seek(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - ok(t, b.Put([]byte("foo"), []byte("0001"))) - ok(t, b.Put([]byte("bar"), []byte("0002"))) - ok(t, b.Put([]byte("baz"), []byte("0003"))) - _, err = b.CreateBucket([]byte("bkt")) - ok(t, err) - return nil - }) - db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - - // Exact match should go to the key. - k, v := c.Seek([]byte("bar")) - equals(t, []byte("bar"), k) - equals(t, []byte("0002"), v) - - // Inexact match should go to the next key. - k, v = c.Seek([]byte("bas")) - equals(t, []byte("baz"), k) - equals(t, []byte("0003"), v) - - // Low key should go to the first key. - k, v = c.Seek([]byte("")) - equals(t, []byte("bar"), k) - equals(t, []byte("0002"), v) - - // High key should return no key. - k, v = c.Seek([]byte("zzz")) - assert(t, k == nil, "") - assert(t, v == nil, "") - - // Buckets should return their key but no value. - k, v = c.Seek([]byte("bkt")) - equals(t, []byte("bkt"), k) - assert(t, v == nil, "") - - return nil - }) -} - -func TestCursor_Delete(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var count = 1000 - - // Insert every other key between 0 and $count. - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) - for i := 0; i < count; i += 1 { - k := make([]byte, 8) - binary.BigEndian.PutUint64(k, uint64(i)) - b.Put(k, make([]byte, 100)) - } - b.CreateBucket([]byte("sub")) - return nil - }) - - db.Update(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - bound := make([]byte, 8) - binary.BigEndian.PutUint64(bound, uint64(count/2)) - for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() { - if err := c.Delete(); err != nil { - return err - } - } - c.Seek([]byte("sub")) - err := c.Delete() - equals(t, err, bolt.ErrIncompatibleValue) - return nil - }) - - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - equals(t, b.Stats().KeyN, count/2+1) - return nil - }) -} - -// Ensure that a Tx cursor can seek to the appropriate keys when there are a -// large number of keys. This test also checks that seek will always move -// forward to the next key. -// -// Related: https://github.com/boltdb/bolt/pull/187 -func TestCursor_Seek_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var count = 10000 - - // Insert every other key between 0 and $count. - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) - for i := 0; i < count; i += 100 { - for j := i; j < i+100; j += 2 { - k := make([]byte, 8) - binary.BigEndian.PutUint64(k, uint64(j)) - b.Put(k, make([]byte, 100)) - } - } - return nil - }) - - db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - for i := 0; i < count; i++ { - seek := make([]byte, 8) - binary.BigEndian.PutUint64(seek, uint64(i)) - - k, _ := c.Seek(seek) - - // The last seek is beyond the end of the the range so - // it should return nil. - if i == count-1 { - assert(t, k == nil, "") - continue - } - - // Otherwise we should seek to the exact key or the next key. - num := binary.BigEndian.Uint64(k) - if i%2 == 0 { - equals(t, uint64(i), num) - } else { - equals(t, uint64(i+1), num) - } - } - - return nil - }) -} - -// Ensure that a cursor can iterate over an empty bucket without error. -func TestCursor_EmptyBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - k, v := c.First() - assert(t, k == nil, "") - assert(t, v == nil, "") - return nil - }) -} - -// Ensure that a Tx cursor can reverse iterate over an empty bucket without error. -func TestCursor_EmptyBucketReverse(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - k, v := c.Last() - assert(t, k == nil, "") - assert(t, v == nil, "") - return nil - }) -} - -// Ensure that a Tx cursor can iterate over a single root with a couple elements. -func TestCursor_Iterate_Leaf(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0}) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1}) - return nil - }) - tx, _ := db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - - k, v := c.First() - equals(t, string(k), "bar") - equals(t, v, []byte{1}) - - k, v = c.Next() - equals(t, string(k), "baz") - equals(t, v, []byte{}) - - k, v = c.Next() - equals(t, string(k), "foo") - equals(t, v, []byte{0}) - - k, v = c.Next() - assert(t, k == nil, "") - assert(t, v == nil, "") - - k, v = c.Next() - assert(t, k == nil, "") - assert(t, v == nil, "") - - tx.Rollback() -} - -// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements. -func TestCursor_LeafRootReverse(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0}) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1}) - return nil - }) - tx, _ := db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - - k, v := c.Last() - equals(t, string(k), "foo") - equals(t, v, []byte{0}) - - k, v = c.Prev() - equals(t, string(k), "baz") - equals(t, v, []byte{}) - - k, v = c.Prev() - equals(t, string(k), "bar") - equals(t, v, []byte{1}) - - k, v = c.Prev() - assert(t, k == nil, "") - assert(t, v == nil, "") - - k, v = c.Prev() - assert(t, k == nil, "") - assert(t, v == nil, "") - - tx.Rollback() -} - -// Ensure that a Tx cursor can restart from the beginning. -func TestCursor_Restart(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{}) - return nil - }) - - tx, _ := db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - - k, _ := c.First() - equals(t, string(k), "bar") - - k, _ = c.Next() - equals(t, string(k), "foo") - - k, _ = c.First() - equals(t, string(k), "bar") - - k, _ = c.Next() - equals(t, string(k), "foo") - - tx.Rollback() -} - -// Ensure that a Tx can iterate over all elements in a bucket. -func TestCursor_QuickCheck(t *testing.T) { - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - - // Bulk insert all values. - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) - } - ok(t, tx.Commit()) - - // Sort test data. - sort.Sort(items) - - // Iterate over all items and check consistency. - var index = 0 - tx, _ = db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { - equals(t, k, items[index].Key) - equals(t, v, items[index].Value) - index++ - } - equals(t, len(items), index) - tx.Rollback() - - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can iterate over all elements in a bucket in reverse. -func TestCursor_QuickCheck_Reverse(t *testing.T) { - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() - - // Bulk insert all values. - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) - } - ok(t, tx.Commit()) - - // Sort test data. - sort.Sort(revtestdata(items)) - - // Iterate over all items and check consistency. - var index = 0 - tx, _ = db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { - equals(t, k, items[index].Key) - equals(t, v, items[index].Value) - index++ - } - equals(t, len(items), index) - tx.Rollback() - - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a Tx cursor can iterate over subbuckets. -func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - _, err = b.CreateBucket([]byte("foo")) - ok(t, err) - _, err = b.CreateBucket([]byte("bar")) - ok(t, err) - _, err = b.CreateBucket([]byte("baz")) - ok(t, err) - return nil - }) - db.View(func(tx *bolt.Tx) error { - var names []string - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - names = append(names, string(k)) - assert(t, v == nil, "") - } - equals(t, names, []string{"bar", "baz", "foo"}) - return nil - }) -} - -// Ensure that a Tx cursor can reverse iterate over subbuckets. -func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { - db := NewTestDB() - defer db.Close() - - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - _, err = b.CreateBucket([]byte("foo")) - ok(t, err) - _, err = b.CreateBucket([]byte("bar")) - ok(t, err) - _, err = b.CreateBucket([]byte("baz")) - ok(t, err) - return nil - }) - db.View(func(tx *bolt.Tx) error { - var names []string - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.Last(); k != nil; k, v = c.Prev() { - names = append(names, string(k)) - assert(t, v == nil, "") - } - equals(t, names, []string{"foo", "baz", "bar"}) - return nil - }) -} - -func ExampleCursor() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start a read-write transaction. - db.Update(func(tx *bolt.Tx) error { - // Create a new bucket. - tx.CreateBucket([]byte("animals")) - - // Insert data into a bucket. - b := tx.Bucket([]byte("animals")) - b.Put([]byte("dog"), []byte("fun")) - b.Put([]byte("cat"), []byte("lame")) - b.Put([]byte("liger"), []byte("awesome")) - - // Create a cursor for iteration. - c := b.Cursor() - - // Iterate over items in sorted key order. This starts from the - // first key/value pair and updates the k/v variables to the - // next key/value on each iteration. - // - // The loop finishes at the end of the cursor when a nil key is returned. - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("A %s is %s.\n", k, v) - } - - return nil - }) - - // Output: - // A cat is lame. - // A dog is fun. - // A liger is awesome. -} - -func ExampleCursor_reverse() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start a read-write transaction. - db.Update(func(tx *bolt.Tx) error { - // Create a new bucket. - tx.CreateBucket([]byte("animals")) - - // Insert data into a bucket. - b := tx.Bucket([]byte("animals")) - b.Put([]byte("dog"), []byte("fun")) - b.Put([]byte("cat"), []byte("lame")) - b.Put([]byte("liger"), []byte("awesome")) - - // Create a cursor for iteration. - c := b.Cursor() - - // Iterate over items in reverse sorted key order. This starts - // from the last key/value pair and updates the k/v variables to - // the previous key/value on each iteration. - // - // The loop finishes at the beginning of the cursor when a nil key - // is returned. - for k, v := c.Last(); k != nil; k, v = c.Prev() { - fmt.Printf("A %s is %s.\n", k, v) - } - - return nil - }) - - // Output: - // A liger is awesome. - // A dog is fun. - // A cat is lame. -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/db.go b/Godeps/_workspace/src/github.com/boltdb/bolt/db.go deleted file mode 100644 index d39c4aa9c..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/db.go +++ /dev/null @@ -1,792 +0,0 @@ -package bolt - -import ( - "fmt" - "hash/fnv" - "os" - "runtime" - "runtime/debug" - "strings" - "sync" - "time" - "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronzied using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" - -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond -) - -// DB represents a collection of buckets persisted to a file on disk. -// All data access is performed through transactions which can be obtained through the DB. -// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. -type DB struct { - // When enabled, the database will perform a Check() after every commit. - // A panic is issued if the database is in an inconsistent state. This - // flag has a large performance impact so it should only be used for - // debugging purposes. - StrictMode bool - - // Setting the NoSync flag will cause the database to skip fsync() - // calls after each commit. This can be useful when bulk loading data - // into a database and you can restart the bulk load in the event of - // a system failure or database corruption. Do not set this flag for - // normal use. - // - // If the package global IgnoreNoSync constant is true, this value is - // ignored. See the comment on that constant for more details. - // - // THIS IS UNSAFE. PLEASE USE WITH CAUTION. - NoSync bool - - // When true, skips the truncate call when growing the database. - // Setting this to true is only safe on non-ext3/ext4 systems. - // Skipping truncation avoids preallocation of hard drive space and - // bypasses a truncate() and fsync() syscall on remapping. - // - // https://github.com/boltdb/bolt/issues/284 - NoGrowSync bool - - // MaxBatchSize is the maximum size of a batch. Default value is - // copied from DefaultMaxBatchSize in Open. - // - // If <=0, disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchSize int - - // MaxBatchDelay is the maximum delay before a batch starts. - // Default value is copied from DefaultMaxBatchDelay in Open. - // - // If <=0, effectively disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchDelay time.Duration - - path string - file *os.File - dataref []byte // mmap'ed readonly, write throws SEGV - data *[maxMapSize]byte - datasz int - meta0 *meta - meta1 *meta - pageSize int - opened bool - rwtx *Tx - txs []*Tx - freelist *freelist - stats Stats - - batchMu sync.Mutex - batch *batch - - rwlock sync.Mutex // Allows only one writer at a time. - metalock sync.Mutex // Protects meta page access. - mmaplock sync.RWMutex // Protects mmap access during remapping. - statlock sync.RWMutex // Protects stats access. - - ops struct { - writeAt func(b []byte, off int64) (n int, err error) - } - - // Read only mode. - // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. - readOnly bool -} - -// Path returns the path to currently open database file. -func (db *DB) Path() string { - return db.path -} - -// GoString returns the Go string representation of the database. -func (db *DB) GoString() string { - return fmt.Sprintf("bolt.DB{path:%q}", db.path) -} - -// String returns the string representation of the database. -func (db *DB) String() string { - return fmt.Sprintf("DB<%q>", db.path) -} - -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. -// Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - var db = &DB{opened: true} - - // Set default options if no options are provided. - if options == nil { - options = DefaultOptions - } - db.NoGrowSync = options.NoGrowSync - - // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - - flag := os.O_RDWR - if options.ReadOnly { - flag = os.O_RDONLY - db.readOnly = true - } - - // Open data file and separate sync handler for metadata writes. - db.path = path - var err error - if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { - _ = db.close() - return nil, err - } - - // Lock file so that other processes using Bolt in read-write mode cannot - // use the database at the same time. This would cause corruption since - // the two processes would write meta pages and free pages separately. - // The database file is locked exclusively (only one process can grab the lock) - // if !options.ReadOnly. - // The database file is locked using the shared lock (more than one process may - // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db.file, !db.readOnly, options.Timeout); err != nil { - _ = db.close() - return nil, err - } - - // Default values for test hooks - db.ops.writeAt = db.file.WriteAt - - // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { - return nil, fmt.Errorf("stat error: %s", err) - } else if info.Size() == 0 { - // Initialize new files with meta pages. - if err := db.init(); err != nil { - return nil, err - } - } else { - // Read the first meta page to determine the page size. - var buf [0x1000]byte - if _, err := db.file.ReadAt(buf[:], 0); err == nil { - m := db.pageInBuffer(buf[:], 0).meta() - if err := m.validate(); err != nil { - return nil, fmt.Errorf("meta0 error: %s", err) - } - db.pageSize = int(m.pageSize) - } - } - - // Memory map the data file. - if err := db.mmap(0); err != nil { - _ = db.close() - return nil, err - } - - // Read in the freelist. - db.freelist = newFreelist() - db.freelist.read(db.page(db.meta().freelist)) - - // Mark the database as opened and return. - return db, nil -} - -// mmap opens the underlying memory-mapped file and initializes the meta references. -// minsz is the minimum size that the new mmap can be. -func (db *DB) mmap(minsz int) error { - db.mmaplock.Lock() - defer db.mmaplock.Unlock() - - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } - - // Ensure the size is at least the minimum size. - var size = int(info.Size()) - if size < minsz { - size = minsz - } - size, err = db.mmapSize(size) - if err != nil { - return err - } - - // Dereference all mmap references before unmapping. - if db.rwtx != nil { - db.rwtx.root.dereference() - } - - // Unmap existing data before continuing. - if err := db.munmap(); err != nil { - return err - } - - // Memory-map the data file as a byte slice. - if err := mmap(db, size); err != nil { - return err - } - - // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() - - // Validate the meta pages. - if err := db.meta0.validate(); err != nil { - return fmt.Errorf("meta0 error: %s", err) - } - if err := db.meta1.validate(); err != nil { - return fmt.Errorf("meta1 error: %s", err) - } - - return nil -} - -// munmap unmaps the data file from memory. -func (db *DB) munmap() error { - if err := munmap(db); err != nil { - return fmt.Errorf("unmap error: " + err.Error()) - } - return nil -} - -// mmapSize determines the appropriate size for the mmap given the current size -// of the database. The minimum size is 1MB and doubles until it reaches 1GB. -// Returns an error if the new mmap size is greater than the max allowed. -func (db *DB) mmapSize(size int) (int, error) { - // Double the size from 32KB until 1GB. - for i := uint(15); i <= 30; i++ { - if size <= 1< maxMapSize { - return 0, fmt.Errorf("mmap too large") - } - - // If larger than 1GB then grow by 1GB at a time. - sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder - } - - // Ensure that the mmap size is a multiple of the page size. - // This should always be true since we're incrementing in MBs. - pageSize := int64(db.pageSize) - if (sz % pageSize) != 0 { - sz = ((sz / pageSize) + 1) * pageSize - } - - // If we've exceeded the max size then only grow up to the max size. - if sz > maxMapSize { - sz = maxMapSize - } - - return int(sz), nil -} - -// init creates a new database file and initializes its meta pages. -func (db *DB) init() error { - // Set the page size to the OS page size. - db.pageSize = os.Getpagesize() - - // Create two meta pages on a buffer. - buf := make([]byte, db.pageSize*4) - for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf[:], pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag - - // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - } - - // Write an empty freelist at page 3. - p := db.pageInBuffer(buf[:], pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 - - // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf[:], pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 - - // Write the buffer to our data file. - if _, err := db.ops.writeAt(buf, 0); err != nil { - return err - } - if err := fdatasync(db); err != nil { - return err - } - - return nil -} - -// Close releases all database resources. -// All transactions must be closed before closing the database. -func (db *DB) Close() error { - db.rwlock.Lock() - defer db.rwlock.Unlock() - - db.metalock.Lock() - defer db.metalock.Unlock() - - db.mmaplock.RLock() - defer db.mmaplock.RUnlock() - - return db.close() -} - -func (db *DB) close() error { - db.opened = false - - db.freelist = nil - db.path = "" - - // Clear ops. - db.ops.writeAt = nil - - // Close the mmap. - if err := db.munmap(); err != nil { - return err - } - - // Close file handles. - if db.file != nil { - // No need to unlock read-only file. - if !db.readOnly { - // Unlock the file. - _ = funlock(db.file) - } - - // Close the file descriptor. - if err := db.file.Close(); err != nil { - return fmt.Errorf("db file close: %s", err) - } - db.file = nil - } - - return nil -} - -// Begin starts a new transaction. -// Multiple read-only transactions can be used concurrently but only one -// write transaction can be used at a time. Starting multiple write transactions -// will cause the calls to block and be serialized until the current write -// transaction finishes. -// -// Transactions should not be depedent on one another. Opening a read -// transaction and a write transaction in the same goroutine can cause the -// writer to deadlock because the database periodically needs to re-mmap itself -// as it grows and it cannot do that while a read transaction is open. -// -// IMPORTANT: You must close read-only transactions after you are finished or -// else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { - if writable { - return db.beginRWTx() - } - return db.beginTx() -} - -func (db *DB) beginTx() (*Tx, error) { - // Lock the meta pages while we initialize the transaction. We obtain - // the meta lock before the mmap lock because that's the order that the - // write transaction will obtain them. - db.metalock.Lock() - - // Obtain a read-only lock on the mmap. When the mmap is remapped it will - // obtain a write lock so all transactions must finish before it can be - // remapped. - db.mmaplock.RLock() - - // Exit if the database is not open yet. - if !db.opened { - db.mmaplock.RUnlock() - db.metalock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{} - t.init(db) - - // Keep track of transaction until it closes. - db.txs = append(db.txs, t) - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Update the transaction stats. - db.statlock.Lock() - db.stats.TxN++ - db.stats.OpenTxN = n - db.statlock.Unlock() - - return t, nil -} - -func (db *DB) beginRWTx() (*Tx, error) { - // If the database was opened with Options.ReadOnly, return an error. - if db.readOnly { - return nil, ErrDatabaseReadOnly - } - - // Obtain writer lock. This is released by the transaction when it closes. - // This enforces only one writer transaction at a time. - db.rwlock.Lock() - - // Once we have the writer lock then we can lock the meta pages so that - // we can set up the transaction. - db.metalock.Lock() - defer db.metalock.Unlock() - - // Exit if the database is not open yet. - if !db.opened { - db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{writable: true} - t.init(db) - db.rwtx = t - - // Free any pages associated with closed read-only transactions. - var minid txid = 0xFFFFFFFFFFFFFFFF - for _, t := range db.txs { - if t.meta.txid < minid { - minid = t.meta.txid - } - } - if minid > 0 { - db.freelist.release(minid - 1) - } - - return t, nil -} - -// removeTx removes a transaction from the database. -func (db *DB) removeTx(tx *Tx) { - // Release the read lock on the mmap. - db.mmaplock.RUnlock() - - // Use the meta lock to restrict access to the DB object. - db.metalock.Lock() - - // Remove the transaction. - for i, t := range db.txs { - if t == tx { - db.txs = append(db.txs[:i], db.txs[i+1:]...) - break - } - } - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Merge statistics. - db.statlock.Lock() - db.stats.OpenTxN = n - db.stats.TxStats.add(&tx.stats) - db.statlock.Unlock() -} - -// Update executes a function within the context of a read-write managed transaction. -// If no error is returned from the function then the transaction is committed. -// If an error is returned then the entire transaction is rolled back. -// Any error that is returned from the function or returned from the commit is -// returned from the Update() method. -// -// Attempting to manually commit or rollback within the function will cause a panic. -func (db *DB) Update(fn func(*Tx) error) error { - t, err := db.Begin(true) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually commit. - t.managed = true - - // If an error is returned from the function then rollback and return error. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - return t.Commit() -} - -// View executes a function within the context of a managed read-only transaction. -// Any error that is returned from the function is returned from the View() method. -// -// Attempting to manually rollback within the function will cause a panic. -func (db *DB) View(fn func(*Tx) error) error { - t, err := db.Begin(false) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually rollback. - t.managed = true - - // If an error is returned from the function then pass it through. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - if err := t.Rollback(); err != nil { - return err - } - - return nil -} - -// Sync executes fdatasync() against the database file handle. -// -// This is not necessary under normal operation, however, if you use NoSync -// then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } - -// Stats retrieves ongoing performance stats for the database. -// This is only updated when a transaction closes. -func (db *DB) Stats() Stats { - db.statlock.RLock() - defer db.statlock.RUnlock() - return db.stats -} - -// This is for internal access to the raw data bytes from the C cursor, use -// carefully, or not at all. -func (db *DB) Info() *Info { - return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} -} - -// page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) -} - -// pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) -} - -// meta retrieves the current meta page reference. -func (db *DB) meta() *meta { - if db.meta0.txid > db.meta1.txid { - return db.meta0 - } - return db.meta1 -} - -// allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(count int) (*page, error) { - // Allocate a temporary buffer for the page. - buf := make([]byte, count*db.pageSize) - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) - - // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(count); p.id != 0 { - return p, nil - } - - // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize - if minsz >= db.datasz { - if err := db.mmap(minsz); err != nil { - return nil, fmt.Errorf("mmap allocate error: %s", err) - } - } - - // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) - - return p, nil -} - -func (db *DB) IsReadOnly() bool { - return db.readOnly -} - -// Options represents the options that can be set when opening a database. -type Options struct { - // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. - Timeout time.Duration - - // Sets the DB.NoGrowSync flag before memory mapping the file. - NoGrowSync bool - - // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to - // grab a shared lock (UNIX). - ReadOnly bool -} - -// DefaultOptions represent the options used if nil options are passed into Open(). -// No timeout is used which will cause Bolt to wait indefinitely for a lock. -var DefaultOptions = &Options{ - Timeout: 0, - NoGrowSync: false, -} - -// Stats represents statistics about the database. -type Stats struct { - // Freelist stats - FreePageN int // total number of free pages on the freelist - PendingPageN int // total number of pending pages on the freelist - FreeAlloc int // total bytes allocated in free pages - FreelistInuse int // total bytes used by the freelist - - // Transaction stats - TxN int // total number of started read transactions - OpenTxN int // number of currently open read transactions - - TxStats TxStats // global, ongoing stats. -} - -// Sub calculates and returns the difference between two sets of database stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *Stats) Sub(other *Stats) Stats { - if other == nil { - return *s - } - var diff Stats - diff.FreePageN = s.FreePageN - diff.PendingPageN = s.PendingPageN - diff.FreeAlloc = s.FreeAlloc - diff.FreelistInuse = s.FreelistInuse - diff.TxN = other.TxN - s.TxN - diff.TxStats = s.TxStats.Sub(&other.TxStats) - return diff -} - -func (s *Stats) add(other *Stats) { - s.TxStats.add(&other.TxStats) -} - -type Info struct { - Data uintptr - PageSize int -} - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.checksum != 0 && m.checksum != m.sum64() { - return ErrChecksum - } else if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid { - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -func printstack() { - stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") - fmt.Fprintln(os.Stderr, stack) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go deleted file mode 100644 index dddf22b46..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go +++ /dev/null @@ -1,903 +0,0 @@ -package bolt_test - -import ( - "encoding/binary" - "errors" - "flag" - "fmt" - "io/ioutil" - "os" - "regexp" - "runtime" - "sort" - "strings" - "testing" - "time" - - "github.com/boltdb/bolt" -) - -var statsFlag = flag.Bool("stats", false, "show performance stats") - -// Ensure that opening a database with a bad path returns an error. -func TestOpen_BadPath(t *testing.T) { - db, err := bolt.Open("", 0666, nil) - assert(t, err != nil, "err: %s", err) - assert(t, db == nil, "") -} - -// Ensure that a database can be opened without error. -func TestOpen(t *testing.T) { - path := tempfile() - defer os.Remove(path) - db, err := bolt.Open(path, 0666, nil) - assert(t, db != nil, "") - ok(t, err) - equals(t, db.Path(), path) - ok(t, db.Close()) -} - -// Ensure that opening an already open database file will timeout. -func TestOpen_Timeout(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("timeout not supported on windows") - } - - path := tempfile() - defer os.Remove(path) - - // Open a data file. - db0, err := bolt.Open(path, 0666, nil) - assert(t, db0 != nil, "") - ok(t, err) - - // Attempt to open the database again. - start := time.Now() - db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond}) - assert(t, db1 == nil, "") - equals(t, bolt.ErrTimeout, err) - assert(t, time.Since(start) > 100*time.Millisecond, "") - - db0.Close() -} - -// Ensure that opening an already open database file will wait until its closed. -func TestOpen_Wait(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("timeout not supported on windows") - } - - path := tempfile() - defer os.Remove(path) - - // Open a data file. - db0, err := bolt.Open(path, 0666, nil) - assert(t, db0 != nil, "") - ok(t, err) - - // Close it in just a bit. - time.AfterFunc(100*time.Millisecond, func() { db0.Close() }) - - // Attempt to open the database again. - start := time.Now() - db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond}) - assert(t, db1 != nil, "") - ok(t, err) - assert(t, time.Since(start) > 100*time.Millisecond, "") -} - -// Ensure that opening a database does not increase its size. -// https://github.com/boltdb/bolt/issues/291 -func TestOpen_Size(t *testing.T) { - // Open a data file. - db := NewTestDB() - path := db.Path() - defer db.Close() - - // Insert until we get above the minimum 4MB size. - ok(t, db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("data")) - for i := 0; i < 10000; i++ { - ok(t, b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000))) - } - return nil - })) - - // Close database and grab the size. - db.DB.Close() - sz := fileSize(path) - if sz == 0 { - t.Fatalf("unexpected new file size: %d", sz) - } - - // Reopen database, update, and check size again. - db0, err := bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db0.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) })) - ok(t, db0.Close()) - newSz := fileSize(path) - if newSz == 0 { - t.Fatalf("unexpected new file size: %d", newSz) - } - - // Compare the original size with the new size. - if sz != newSz { - t.Fatalf("unexpected file growth: %d => %d", sz, newSz) - } -} - -// Ensure that opening a database beyond the max step size does not increase its size. -// https://github.com/boltdb/bolt/issues/303 -func TestOpen_Size_Large(t *testing.T) { - if testing.Short() { - t.Skip("short mode") - } - - // Open a data file. - db := NewTestDB() - path := db.Path() - defer db.Close() - - // Insert until we get above the minimum 4MB size. - var index uint64 - for i := 0; i < 10000; i++ { - ok(t, db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("data")) - for j := 0; j < 1000; j++ { - ok(t, b.Put(u64tob(index), make([]byte, 50))) - index++ - } - return nil - })) - } - - // Close database and grab the size. - db.DB.Close() - sz := fileSize(path) - if sz == 0 { - t.Fatalf("unexpected new file size: %d", sz) - } else if sz < (1 << 30) { - t.Fatalf("expected larger initial size: %d", sz) - } - - // Reopen database, update, and check size again. - db0, err := bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db0.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) })) - ok(t, db0.Close()) - newSz := fileSize(path) - if newSz == 0 { - t.Fatalf("unexpected new file size: %d", newSz) - } - - // Compare the original size with the new size. - if sz != newSz { - t.Fatalf("unexpected file growth: %d => %d", sz, newSz) - } -} - -// Ensure that a re-opened database is consistent. -func TestOpen_Check(t *testing.T) { - path := tempfile() - defer os.Remove(path) - - db, err := bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) - db.Close() - - db, err = bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) - db.Close() -} - -// Ensure that the database returns an error if the file handle cannot be open. -func TestDB_Open_FileError(t *testing.T) { - path := tempfile() - defer os.Remove(path) - - _, err := bolt.Open(path+"/youre-not-my-real-parent", 0666, nil) - assert(t, err.(*os.PathError) != nil, "") - equals(t, path+"/youre-not-my-real-parent", err.(*os.PathError).Path) - equals(t, "open", err.(*os.PathError).Op) -} - -// Ensure that write errors to the meta file handler during initialization are returned. -func TestDB_Open_MetaInitWriteError(t *testing.T) { - t.Skip("pending") -} - -// Ensure that a database that is too small returns an error. -func TestDB_Open_FileTooSmall(t *testing.T) { - path := tempfile() - defer os.Remove(path) - - db, err := bolt.Open(path, 0666, nil) - ok(t, err) - db.Close() - - // corrupt the database - ok(t, os.Truncate(path, int64(os.Getpagesize()))) - - db, err = bolt.Open(path, 0666, nil) - equals(t, errors.New("file size too small"), err) -} - -// Ensure that a database can be opened in read-only mode by multiple processes -// and that a database can not be opened in read-write mode and in read-only -// mode at the same time. -func TestOpen_ReadOnly(t *testing.T) { - bucket, key, value := []byte(`bucket`), []byte(`key`), []byte(`value`) - - path := tempfile() - defer os.Remove(path) - - // Open in read-write mode. - db, err := bolt.Open(path, 0666, nil) - ok(t, db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket(bucket) - if err != nil { - return err - } - return b.Put(key, value) - })) - assert(t, db != nil, "") - assert(t, !db.IsReadOnly(), "") - ok(t, err) - ok(t, db.Close()) - - // Open in read-only mode. - db0, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) - ok(t, err) - defer db0.Close() - - // Opening in read-write mode should return an error. - _, err = bolt.Open(path, 0666, &bolt.Options{Timeout: time.Millisecond * 100}) - assert(t, err != nil, "") - - // And again (in read-only mode). - db1, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) - ok(t, err) - defer db1.Close() - - // Verify both read-only databases are accessible. - for _, db := range []*bolt.DB{db0, db1} { - // Verify is is in read only mode indeed. - assert(t, db.IsReadOnly(), "") - - // Read-only databases should not allow updates. - assert(t, - bolt.ErrDatabaseReadOnly == db.Update(func(*bolt.Tx) error { - panic(`should never get here`) - }), - "") - - // Read-only databases should not allow beginning writable txns. - _, err = db.Begin(true) - assert(t, bolt.ErrDatabaseReadOnly == err, "") - - // Verify the data. - ok(t, db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(bucket) - if b == nil { - return fmt.Errorf("expected bucket `%s`", string(bucket)) - } - - got := string(b.Get(key)) - expected := string(value) - if got != expected { - return fmt.Errorf("expected `%s`, got `%s`", expected, got) - } - return nil - })) - } -} - -// TODO(benbjohnson): Test corruption at every byte of the first two pages. - -// Ensure that a database cannot open a transaction when it's not open. -func TestDB_Begin_DatabaseNotOpen(t *testing.T) { - var db bolt.DB - tx, err := db.Begin(false) - assert(t, tx == nil, "") - equals(t, err, bolt.ErrDatabaseNotOpen) -} - -// Ensure that a read-write transaction can be retrieved. -func TestDB_BeginRW(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, err := db.Begin(true) - assert(t, tx != nil, "") - ok(t, err) - assert(t, tx.DB() == db.DB, "") - equals(t, tx.Writable(), true) - ok(t, tx.Commit()) -} - -// Ensure that opening a transaction while the DB is closed returns an error. -func TestDB_BeginRW_Closed(t *testing.T) { - var db bolt.DB - tx, err := db.Begin(true) - equals(t, err, bolt.ErrDatabaseNotOpen) - assert(t, tx == nil, "") -} - -func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) } -func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false) } - -// Ensure that a database cannot close while transactions are open. -func testDB_Close_PendingTx(t *testing.T, writable bool) { - db := NewTestDB() - defer db.Close() - - // Start transaction. - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - // Open update in separate goroutine. - done := make(chan struct{}) - go func() { - db.Close() - close(done) - }() - - // Ensure database hasn't closed. - time.Sleep(100 * time.Millisecond) - select { - case <-done: - t.Fatal("database closed too early") - default: - } - - // Commit transaction. - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - - // Ensure database closed now. - time.Sleep(100 * time.Millisecond) - select { - case <-done: - default: - t.Fatal("database did not close") - } -} - -// Ensure a database can provide a transactional block. -func TestDB_Update(t *testing.T) { - db := NewTestDB() - defer db.Close() - err := db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("foo"), []byte("bar")) - b.Put([]byte("baz"), []byte("bat")) - b.Delete([]byte("foo")) - return nil - }) - ok(t, err) - err = db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") - equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) - return nil - }) - ok(t, err) -} - -// Ensure a closed database returns an error while running a transaction block -func TestDB_Update_Closed(t *testing.T) { - var db bolt.DB - err := db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) - equals(t, err, bolt.ErrDatabaseNotOpen) -} - -// Ensure a panic occurs while trying to commit a managed transaction. -func TestDB_Update_ManualCommit(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var ok bool - db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - ok = true - } - }() - tx.Commit() - }() - return nil - }) - assert(t, ok, "expected panic") -} - -// Ensure a panic occurs while trying to rollback a managed transaction. -func TestDB_Update_ManualRollback(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var ok bool - db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - ok = true - } - }() - tx.Rollback() - }() - return nil - }) - assert(t, ok, "expected panic") -} - -// Ensure a panic occurs while trying to commit a managed transaction. -func TestDB_View_ManualCommit(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var ok bool - db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - ok = true - } - }() - tx.Commit() - }() - return nil - }) - assert(t, ok, "expected panic") -} - -// Ensure a panic occurs while trying to rollback a managed transaction. -func TestDB_View_ManualRollback(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var ok bool - db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - ok = true - } - }() - tx.Rollback() - }() - return nil - }) - assert(t, ok, "expected panic") -} - -// Ensure a write transaction that panics does not hold open locks. -func TestDB_Update_Panic(t *testing.T) { - db := NewTestDB() - defer db.Close() - - func() { - defer func() { - if r := recover(); r != nil { - t.Log("recover: update", r) - } - }() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - panic("omg") - }) - }() - - // Verify we can update again. - err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - ok(t, err) - - // Verify that our change persisted. - err = db.Update(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") - return nil - }) -} - -// Ensure a database can return an error through a read-only transactional block. -func TestDB_View_Error(t *testing.T) { - db := NewTestDB() - defer db.Close() - err := db.View(func(tx *bolt.Tx) error { - return errors.New("xxx") - }) - equals(t, errors.New("xxx"), err) -} - -// Ensure a read transaction that panics does not hold open locks. -func TestDB_View_Panic(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) - - func() { - defer func() { - if r := recover(); r != nil { - t.Log("recover: view", r) - } - }() - db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") - panic("omg") - }) - }() - - // Verify that we can still use read transactions. - db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") - return nil - }) -} - -// Ensure that an error is returned when a database write fails. -func TestDB_Commit_WriteFail(t *testing.T) { - t.Skip("pending") // TODO(benbjohnson) -} - -// Ensure that DB stats can be returned. -func TestDB_Stats(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - stats := db.Stats() - equals(t, 2, stats.TxStats.PageCount) - equals(t, 0, stats.FreePageN) - equals(t, 2, stats.PendingPageN) -} - -// Ensure that database pages are in expected order and type. -func TestDB_Consistency(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - - for i := 0; i < 10; i++ { - db.Update(func(tx *bolt.Tx) error { - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - return nil - }) - } - db.Update(func(tx *bolt.Tx) error { - p, _ := tx.Page(0) - assert(t, p != nil, "") - equals(t, "meta", p.Type) - - p, _ = tx.Page(1) - assert(t, p != nil, "") - equals(t, "meta", p.Type) - - p, _ = tx.Page(2) - assert(t, p != nil, "") - equals(t, "free", p.Type) - - p, _ = tx.Page(3) - assert(t, p != nil, "") - equals(t, "free", p.Type) - - p, _ = tx.Page(4) - assert(t, p != nil, "") - equals(t, "leaf", p.Type) - - p, _ = tx.Page(5) - assert(t, p != nil, "") - equals(t, "freelist", p.Type) - - p, _ = tx.Page(6) - assert(t, p == nil, "") - return nil - }) -} - -// Ensure that DB stats can be substracted from one another. -func TestDBStats_Sub(t *testing.T) { - var a, b bolt.Stats - a.TxStats.PageCount = 3 - a.FreePageN = 4 - b.TxStats.PageCount = 10 - b.FreePageN = 14 - diff := b.Sub(&a) - equals(t, 7, diff.TxStats.PageCount) - // free page stats are copied from the receiver and not subtracted - equals(t, 14, diff.FreePageN) -} - -func ExampleDB_Update() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Execute several commands within a write transaction. - err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - return err - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - return err - } - return nil - }) - - // If our transactional block didn't return an error then our data is saved. - if err == nil { - db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value of 'foo' is: %s\n", value) - return nil - }) - } - - // Output: - // The value of 'foo' is: bar -} - -func ExampleDB_View() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Insert data into a bucket. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("people")) - b := tx.Bucket([]byte("people")) - b.Put([]byte("john"), []byte("doe")) - b.Put([]byte("susy"), []byte("que")) - return nil - }) - - // Access data from within a read-only transactional block. - db.View(func(tx *bolt.Tx) error { - v := tx.Bucket([]byte("people")).Get([]byte("john")) - fmt.Printf("John's last name is %s.\n", v) - return nil - }) - - // Output: - // John's last name is doe. -} - -func ExampleDB_Begin_ReadOnly() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Create a bucket. - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - - // Create several keys in a transaction. - tx, _ := db.Begin(true) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("john"), []byte("blue")) - b.Put([]byte("abby"), []byte("red")) - b.Put([]byte("zephyr"), []byte("purple")) - tx.Commit() - - // Iterate over the values in sorted key order. - tx, _ = db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("%s likes %s\n", k, v) - } - tx.Rollback() - - // Output: - // abby likes red - // john likes blue - // zephyr likes purple -} - -// TestDB represents a wrapper around a Bolt DB to handle temporary file -// creation and automatic cleanup on close. -type TestDB struct { - *bolt.DB -} - -// NewTestDB returns a new instance of TestDB. -func NewTestDB() *TestDB { - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - panic("cannot open db: " + err.Error()) - } - return &TestDB{db} -} - -// MustView executes a read-only function. Panic on error. -func (db *TestDB) MustView(fn func(tx *bolt.Tx) error) { - if err := db.DB.View(func(tx *bolt.Tx) error { - return fn(tx) - }); err != nil { - panic(err.Error()) - } -} - -// MustUpdate executes a read-write function. Panic on error. -func (db *TestDB) MustUpdate(fn func(tx *bolt.Tx) error) { - if err := db.DB.View(func(tx *bolt.Tx) error { - return fn(tx) - }); err != nil { - panic(err.Error()) - } -} - -// MustCreateBucket creates a new bucket. Panic on error. -func (db *TestDB) MustCreateBucket(name []byte) { - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte(name)) - return err - }); err != nil { - panic(err.Error()) - } -} - -// Close closes the database and deletes the underlying file. -func (db *TestDB) Close() { - // Log statistics. - if *statsFlag { - db.PrintStats() - } - - // Check database consistency after every test. - db.MustCheck() - - // Close database and remove file. - defer os.Remove(db.Path()) - db.DB.Close() -} - -// PrintStats prints the database stats -func (db *TestDB) PrintStats() { - var stats = db.Stats() - fmt.Printf("[db] %-20s %-20s %-20s\n", - fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc), - fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount), - fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref), - ) - fmt.Printf(" %-20s %-20s %-20s\n", - fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)), - fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)), - fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)), - ) -} - -// MustCheck runs a consistency check on the database and panics if any errors are found. -func (db *TestDB) MustCheck() { - db.Update(func(tx *bolt.Tx) error { - // Collect all the errors. - var errors []error - for err := range tx.Check() { - errors = append(errors, err) - if len(errors) > 10 { - break - } - } - - // If errors occurred, copy the DB and print the errors. - if len(errors) > 0 { - var path = tempfile() - tx.CopyFile(path, 0600) - - // Print errors. - fmt.Print("\n\n") - fmt.Printf("consistency check failed (%d errors)\n", len(errors)) - for _, err := range errors { - fmt.Println(err) - } - fmt.Println("") - fmt.Println("db saved to:") - fmt.Println(path) - fmt.Print("\n\n") - os.Exit(-1) - } - - return nil - }) -} - -// CopyTempFile copies a database to a temporary file. -func (db *TestDB) CopyTempFile() { - path := tempfile() - db.View(func(tx *bolt.Tx) error { return tx.CopyFile(path, 0600) }) - fmt.Println("db copied to: ", path) -} - -// tempfile returns a temporary file path. -func tempfile() string { - f, _ := ioutil.TempFile("", "bolt-") - f.Close() - os.Remove(f.Name()) - return f.Name() -} - -// mustContainKeys checks that a bucket contains a given set of keys. -func mustContainKeys(b *bolt.Bucket, m map[string]string) { - found := make(map[string]string) - b.ForEach(func(k, _ []byte) error { - found[string(k)] = "" - return nil - }) - - // Check for keys found in bucket that shouldn't be there. - var keys []string - for k, _ := range found { - if _, ok := m[string(k)]; !ok { - keys = append(keys, k) - } - } - if len(keys) > 0 { - sort.Strings(keys) - panic(fmt.Sprintf("keys found(%d): %s", len(keys), strings.Join(keys, ","))) - } - - // Check for keys not found in bucket that should be there. - for k, _ := range m { - if _, ok := found[string(k)]; !ok { - keys = append(keys, k) - } - } - if len(keys) > 0 { - sort.Strings(keys) - panic(fmt.Sprintf("keys not found(%d): %s", len(keys), strings.Join(keys, ","))) - } -} - -func trunc(b []byte, length int) []byte { - if length < len(b) { - return b[:length] - } - return b -} - -func truncDuration(d time.Duration) string { - return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1") -} - -func fileSize(path string) int64 { - fi, err := os.Stat(path) - if err != nil { - return 0 - } - return fi.Size() -} - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -// u64tob converts a uint64 into an 8-byte slice. -func u64tob(v uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, v) - return b -} - -// btou64 converts an 8-byte slice into an uint64. -func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/doc.go b/Godeps/_workspace/src/github.com/boltdb/bolt/doc.go deleted file mode 100644 index cc937845d..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Package bolt implements a low-level key/value store in pure Go. It supports -fully serializable transactions, ACID semantics, and lock-free MVCC with -multiple readers and a single writer. Bolt can be used for projects that -want a simple data store without the need to add large dependencies such as -Postgres or MySQL. - -Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is -optimized for fast read access and does not require recovery in the event of a -system crash. Transactions which have not finished committing will simply be -rolled back in the event of a crash. - -The design of Bolt is based on Howard Chu's LMDB database project. - -Bolt currently works on Windows, Mac OS X, and Linux. - - -Basics - -There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is -a collection of buckets and is represented by a single file on disk. A bucket is -a collection of unique keys that are associated with values. - -Transactions provide either read-only or read-write access to the database. -Read-only transactions can retrieve key/value pairs and can use Cursors to -iterate over the dataset sequentially. Read-write transactions can create and -delete buckets and can insert and remove keys. Only one read-write transaction -is allowed at a time. - - -Caveats - -The database uses a read-only, memory-mapped data file to ensure that -applications cannot corrupt the database, however, this means that keys and -values returned from Bolt cannot be changed. Writing to a read-only byte slice -will cause Go to panic. - -Keys and values retrieved from the database are only valid for the life of -the transaction. When used outside the transaction, these byte slices can -point to different data or can point to invalid memory which will cause a panic. - - -*/ -package bolt diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/errors.go b/Godeps/_workspace/src/github.com/boltdb/bolt/errors.go deleted file mode 100644 index 6883786d5..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/errors.go +++ /dev/null @@ -1,70 +0,0 @@ -package bolt - -import "errors" - -// These errors can be returned when opening or calling methods on a DB. -var ( - // ErrDatabaseNotOpen is returned when a DB instance is accessed before it - // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") - - // ErrInvalid is returned when a data file is not a Bolt-formatted database. - ErrInvalid = errors.New("invalid database") - - // ErrVersionMismatch is returned when the data file was created with a - // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") - - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") - - // ErrTimeout is returned when a database cannot obtain an exclusive lock - // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") -) - -// These errors can occur when beginning or committing a Tx. -var ( - // ErrTxNotWritable is returned when performing a write operation on a - // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") - - // ErrTxClosed is returned when committing or rolling back a transaction - // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") - - // ErrDatabaseReadOnly is returned when a mutating transaction is started on a - // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") -) - -// These errors can occur when putting or deleting a value or a bucket. -var ( - // ErrBucketNotFound is returned when trying to access a bucket that has - // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") - - // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") - - // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") - - // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") - - // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") - - // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") - - // ErrIncompatibleValue is returned when trying create or delete a bucket - // on an existing non-bucket key or when trying to create or delete a - // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") -) diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go b/Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go deleted file mode 100644 index 0161948fc..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go +++ /dev/null @@ -1,242 +0,0 @@ -package bolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - ids []pgid // all free and available free page ids. - pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist() *freelist { - return &freelist{ - pending: make(map[txid][]pgid), - cache: make(map[pgid]bool), - } -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// free_count returns count of free pages -func (f *freelist) free_count() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, list := range f.pending { - count += len(list) - } - return count -} - -// all returns a list of all free ids and all pending ids in one sorted list. -func (f *freelist) all() []pgid { - m := make(pgids, 0) - - for _, list := range f.pending { - m = append(m, list...) - } - - sort.Sort(m) - return pgids(f.ids).merge(m) -} - -// allocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) allocate(n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - var ids = f.pending[txid] - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if f.cache[id] { - panic(fmt.Sprintf("page %d already freed", id)) - } - - // Add to the freelist and cache. - ids = append(ids, id) - f.cache[id] = true - } - f.pending[txid] = ids -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, ids := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, ids...) - delete(f.pending, tid) - } - } - sort.Sort(m) - f.ids = pgids(f.ids).merge(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - for _, id := range f.pending[txid] { - delete(f.cache, id) - } - - // Remove pages from pending list. - delete(f.pending, txid) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgid pgid) bool { - return f.cache[pgid] -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - idx, count := 0, int(p.count) - if count == 0xFFFF { - idx = 1 - count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) - } - - // Copy the list of page ids from the freelist. - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] - f.ids = make([]pgid, len(ids)) - copy(f.ids, ids) - - // Make sure they're sorted. - sort.Sort(pgids(f.ids)) - - // Rebuild the page cache. - f.reindex() -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - ids := f.all() - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - if len(ids) < 0xFFFF { - p.count = uint16(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) - } else { - p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.ids { - if !pcache[id] { - a = append(a, id) - } - } - f.ids = a - - // Once the available list is rebuilt then rebuild the free cache so that - // it includes the available and pending free pages. - f.reindex() -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - f.cache = make(map[pgid]bool) - for _, id := range f.ids { - f.cache[id] = true - } - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - f.cache[pendingID] = true - } - } -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go deleted file mode 100644 index 8caeab2ec..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package bolt - -import ( - "math/rand" - "reflect" - "sort" - "testing" - "unsafe" -) - -// Ensure that a page is added to a transaction's freelist. -func TestFreelist_free(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12}) - if !reflect.DeepEqual([]pgid{12}, f.pending[100]) { - t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100]) - } -} - -// Ensure that a page and its overflow is added to a transaction's freelist. -func TestFreelist_free_overflow(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12, overflow: 3}) - if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) { - t.Fatalf("exp=%v; got=%v", exp, f.pending[100]) - } -} - -// Ensure that a transaction's free pages can be released. -func TestFreelist_release(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12, overflow: 1}) - f.free(100, &page{id: 9}) - f.free(102, &page{id: 39}) - f.release(100) - f.release(101) - if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } - - f.release(102) - if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can find contiguous blocks of pages. -func TestFreelist_allocate(t *testing.T) { - f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}} - if id := int(f.allocate(3)); id != 3 { - t.Fatalf("exp=3; got=%v", id) - } - if id := int(f.allocate(1)); id != 6 { - t.Fatalf("exp=6; got=%v", id) - } - if id := int(f.allocate(3)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.allocate(2)); id != 12 { - t.Fatalf("exp=12; got=%v", id) - } - if id := int(f.allocate(1)); id != 7 { - t.Fatalf("exp=7; got=%v", id) - } - if id := int(f.allocate(0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.allocate(0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } - - if id := int(f.allocate(1)); id != 9 { - t.Fatalf("exp=9; got=%v", id) - } - if id := int(f.allocate(1)); id != 18 { - t.Fatalf("exp=18; got=%v", id) - } - if id := int(f.allocate(1)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can deserialize from a freelist page. -func TestFreelist_read(t *testing.T) { - // Create a page. - var buf [4096]byte - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = freelistPageFlag - page.count = 2 - - // Insert 2 page ids. - ids := (*[3]pgid)(unsafe.Pointer(&page.ptr)) - ids[0] = 23 - ids[1] = 50 - - // Deserialize page into a freelist. - f := newFreelist() - f.read(page) - - // Ensure that there are two page ids in the freelist. - if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can serialize into a freelist page. -func TestFreelist_write(t *testing.T) { - // Create a freelist and write it to a page. - var buf [4096]byte - f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)} - f.pending[100] = []pgid{28, 11} - f.pending[101] = []pgid{3} - p := (*page)(unsafe.Pointer(&buf[0])) - f.write(p) - - // Read the page back out. - f2 := newFreelist() - f2.read(p) - - // Ensure that the freelist is correct. - // All pages should be present and in reverse order. - if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) { - t.Fatalf("exp=%v; got=%v", exp, f2.ids) - } -} - -func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) } -func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) } -func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) } -func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) } - -func benchmark_FreelistRelease(b *testing.B, size int) { - ids := randomPgids(size) - pending := randomPgids(len(ids) / 400) - b.ResetTimer() - for i := 0; i < b.N; i++ { - f := &freelist{ids: ids, pending: map[txid][]pgid{1: pending}} - f.release(1) - } -} - -func randomPgids(n int) []pgid { - rand.Seed(42) - pgids := make(pgids, n) - for i := range pgids { - pgids[i] = pgid(rand.Int63()) - } - sort.Sort(pgids) - return pgids -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/node.go b/Godeps/_workspace/src/github.com/boltdb/bolt/node.go deleted file mode 100644 index c9fb21c73..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/node.go +++ /dev/null @@ -1,636 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" - "unsafe" -) - -// node represents an in-memory, deserialized page. -type node struct { - bucket *Bucket - isLeaf bool - unbalanced bool - spilled bool - key []byte - pgid pgid - parent *node - children nodes - inodes inodes -} - -// root returns the top-level node this node is attached to. -func (n *node) root() *node { - if n.parent == nil { - return n - } - return n.parent.root() -} - -// minKeys returns the minimum number of inodes this node should have. -func (n *node) minKeys() int { - if n.isLeaf { - return 1 - } - return 2 -} - -// size returns the size of the node after serialization. -func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - } - return sz -} - -// sizeLessThan returns true if the node is less than a given size. -// This is an optimization to avoid calculating a large node when we only need -// to know if it fits inside a certain page size. -func (n *node) sizeLessThan(v int) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - if sz >= v { - return false - } - } - return true -} - -// pageElementSize returns the size of each page element based on the type of node. -func (n *node) pageElementSize() int { - if n.isLeaf { - return leafPageElementSize - } - return branchPageElementSize -} - -// childAt returns the child node at a given index. -func (n *node) childAt(index int) *node { - if n.isLeaf { - panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) - } - return n.bucket.node(n.inodes[index].pgid, n) -} - -// childIndex returns the index of a given child node. -func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) - return index -} - -// numChildren returns the number of children. -func (n *node) numChildren() int { - return len(n.inodes) -} - -// nextSibling returns the next node with the same parent. -func (n *node) nextSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index >= n.parent.numChildren()-1 { - return nil - } - return n.parent.childAt(index + 1) -} - -// prevSibling returns the previous node with the same parent. -func (n *node) prevSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index == 0 { - return nil - } - return n.parent.childAt(index - 1) -} - -// put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { - if pgid >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) - } else if len(oldKey) <= 0 { - panic("put: zero-length old key") - } else if len(newKey) <= 0 { - panic("put: zero-length new key") - } - - // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) - - // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) - if !exact { - n.inodes = append(n.inodes, inode{}) - copy(n.inodes[index+1:], n.inodes[index:]) - } - - inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgid - _assert(len(inode.key) > 0, "put: zero-length inode key") -} - -// del removes a key from the node. -func (n *node) del(key []byte) { - // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) - - // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { - return - } - - // Delete inode from the node. - n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) - - // Mark the node as needing rebalancing. - n.unbalanced = true -} - -// read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } - - // Save first key so we can find the node in the parent when we spill. - if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") - } else { - n.key = nil - } -} - -// write writes the items onto one or more pages. -func (n *node) write(p *page) { - // Initialize page. - if n.isLeaf { - p.flags |= leafPageFlag - } else { - p.flags |= branchPageFlag - } - - if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) - } - p.count = uint16(len(n.inodes)) - - // Loop over each item and write it to the page. - b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // If the length of key+value is larger than the max allocation size - // then we need to reallocate the byte array pointer. - // - // See: https://github.com/boltdb/bolt/pull/335 - klen, vlen := len(item.key), len(item.value) - if len(b) < klen+vlen { - b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] - } - - // Write data for the element to the end of the page. - copy(b[0:], item.key) - b = b[klen:] - copy(b[0:], item.value) - b = b[vlen:] - } - - // DEBUG ONLY: n.dump() -} - -// split breaks up a node into multiple smaller nodes, if appropriate. -// This should only be called from the spill() function. -func (n *node) split(pageSize int) []*node { - var nodes []*node - - node := n - for { - // Split node into two. - a, b := node.splitTwo(pageSize) - nodes = append(nodes, a) - - // If we can't split then exit the loop. - if b == nil { - break - } - - // Set node to b so it gets split on the next iteration. - node = b - } - - return nodes -} - -// splitTwo breaks up a node into two smaller nodes, if appropriate. -// This should only be called from the split() function. -func (n *node) splitTwo(pageSize int) (*node, *node) { - // Ignore the split if the page doesn't have at least enough nodes for - // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { - return n, nil - } - - // Determine the threshold before starting a new node. - var fillPercent = n.bucket.FillPercent - if fillPercent < minFillPercent { - fillPercent = minFillPercent - } else if fillPercent > maxFillPercent { - fillPercent = maxFillPercent - } - threshold := int(float64(pageSize) * fillPercent) - - // Determine split position and sizes of the two pages. - splitIndex, _ := n.splitIndex(threshold) - - // Split node into two separate nodes. - // If there's no parent then we'll need to create one. - if n.parent == nil { - n.parent = &node{bucket: n.bucket, children: []*node{n}} - } - - // Create a new node and add it to the parent. - next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} - n.parent.children = append(n.parent.children, next) - - // Split inodes across two nodes. - next.inodes = n.inodes[splitIndex:] - n.inodes = n.inodes[:splitIndex] - - // Update the statistics. - n.bucket.tx.stats.Split++ - - return n, next -} - -// splitIndex finds the position where a page will fill a given threshold. -// It returns the index as well as the size of the first page. -// This is only be called from split(). -func (n *node) splitIndex(threshold int) (index, sz int) { - sz = pageHeaderSize - - // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { - index = i - inode := n.inodes[i] - elsize := n.pageElementSize() + len(inode.key) + len(inode.value) - - // If we have at least the minimum number of keys and adding another - // node would put us over the threshold then exit and return. - if i >= minKeysPerPage && sz+elsize > threshold { - break - } - - // Add the element size to the total size. - sz += elsize - } - - return -} - -// spill writes the nodes to dirty pages and splits nodes as it goes. -// Returns an error if dirty pages cannot be allocated. -func (n *node) spill() error { - var tx = n.bucket.tx - if n.spilled { - return nil - } - - // Spill child nodes first. Child nodes can materialize sibling nodes in - // the case of split-merge so we cannot use a range loop. We have to check - // the children size on every loop iteration. - sort.Sort(n.children) - for i := 0; i < len(n.children); i++ { - if err := n.children[i].spill(); err != nil { - return err - } - } - - // We no longer need the child list because it's only used for spill tracking. - n.children = nil - - // Split nodes into appropriate sizes. The first node will always be n. - var nodes = n.split(tx.db.pageSize) - for _, node := range nodes { - // Add node's page to the freelist if it's not new. - if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) - node.pgid = 0 - } - - // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) - if err != nil { - return err - } - - // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) - } - node.pgid = p.id - node.write(p) - node.spilled = true - - // Insert into parent inodes. - if node.parent != nil { - var key = node.key - if key == nil { - key = node.inodes[0].key - } - - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") - } - - // Update the statistics. - tx.stats.Spill++ - } - - // If the root node split and created a new root then we need to spill that - // as well. We'll clear out the children to make sure it doesn't try to respill. - if n.parent != nil && n.parent.pgid == 0 { - n.children = nil - return n.parent.spill() - } - - return nil -} - -// rebalance attempts to combine the node with sibling nodes if the node fill -// size is below a threshold or if there are not enough keys. -func (n *node) rebalance() { - if !n.unbalanced { - return - } - n.unbalanced = false - - // Update statistics. - n.bucket.tx.stats.Rebalance++ - - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 - if n.size() > threshold && len(n.inodes) > n.minKeys() { - return - } - - // Root node has special handling. - if n.parent == nil { - // If root node is a branch and only has one node then collapse it. - if !n.isLeaf && len(n.inodes) == 1 { - // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) - n.isLeaf = child.isLeaf - n.inodes = child.inodes[:] - n.children = child.children - - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent = n - } - } - - // Remove old child. - child.parent = nil - delete(n.bucket.nodes, child.pgid) - child.free() - } - - return - } - - // If node has no keys then just remove it. - if n.numChildren() == 0 { - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - n.parent.rebalance() - return - } - - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) - if useNextSibling { - target = n.nextSibling() - } else { - target = n.prevSibling() - } - - // If target node has extra nodes then just move one over. - if target.numChildren() > target.minKeys() { - if useNextSibling { - // Reparent and move node. - if child, ok := n.bucket.nodes[target.inodes[0].pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - n.inodes = append(n.inodes, target.inodes[0]) - target.inodes = target.inodes[1:] - - // Update target key on parent. - target.parent.put(target.key, target.inodes[0].key, nil, target.pgid, 0) - target.key = target.inodes[0].key - _assert(len(target.key) > 0, "rebalance(1): zero-length node key") - } else { - // Reparent and move node. - if child, ok := n.bucket.nodes[target.inodes[len(target.inodes)-1].pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - n.inodes = append(n.inodes, inode{}) - copy(n.inodes[1:], n.inodes) - n.inodes[0] = target.inodes[len(target.inodes)-1] - target.inodes = target.inodes[:len(target.inodes)-1] - } - - // Update parent key for node. - n.parent.put(n.key, n.inodes[0].key, nil, n.pgid, 0) - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "rebalance(2): zero-length node key") - - return - } - - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - } - - // Either this node or the target node was deleted from the parent so rebalance it. - n.parent.rebalance() -} - -// removes a node from the list of in-memory children. -// This does not affect the inodes. -func (n *node) removeChild(target *node) { - for i, child := range n.children { - if child == target { - n.children = append(n.children[:i], n.children[i+1:]...) - return - } - } -} - -// dereference causes the node to copy all its inode key/value references to heap memory. -// This is required when the mmap is reallocated so inodes are not pointing to stale data. -func (n *node) dereference() { - if n.key != nil { - key := make([]byte, len(n.key)) - copy(key, n.key) - n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") - } - - for i := range n.inodes { - inode := &n.inodes[i] - - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") - - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value - } - - // Recursively dereference children. - for _, child := range n.children { - child.dereference() - } - - // Update statistics. - n.bucket.tx.stats.NodeDeref++ -} - -// free adds the node's underlying page to the freelist. -func (n *node) free() { - if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) - n.pgid = 0 - } -} - -// dump writes the contents of the node to STDERR for debugging purposes. -/* -func (n *node) dump() { - // Write node header. - var typ = "branch" - if n.isLeaf { - typ = "leaf" - } - warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) - - // Write out abbreviated version of each item. - for _, item := range n.inodes { - if n.isLeaf { - if item.flags&bucketLeafFlag != 0 { - bucket := (*bucket)(unsafe.Pointer(&item.value[0])) - warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) - } else { - warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) - } - } else { - warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) - } - } - warn("") -} -*/ - -type nodes []*node - -func (s nodes) Len() int { return len(s) } -func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go deleted file mode 100644 index fa5d10f99..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package bolt - -import ( - "testing" - "unsafe" -) - -// Ensure that a node can insert a key/value. -func TestNode_put(t *testing.T) { - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: &meta{pgid: 1}}}} - n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0) - n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0) - n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) - n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag) - - if len(n.inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(n.inodes)) - } - if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if n.inodes[2].flags != uint32(leafPageFlag) { - t.Fatalf("not a leaf: %d", n.inodes[2].flags) - } -} - -// Ensure that a node can deserialize from a leaf page. -func TestNode_read_LeafPage(t *testing.T) { - // Create a page. - var buf [4096]byte - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = leafPageFlag - page.count = 2 - - // Insert 2 elements at the beginning. sizeof(leafPageElement) == 16 - nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr)) - nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2 - nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4 - - // Write data for the nodes at the end. - data := (*[4096]byte)(unsafe.Pointer(&nodes[2])) - copy(data[:], []byte("barfooz")) - copy(data[7:], []byte("helloworldbye")) - - // Deserialize page into a leaf. - n := &node{} - n.read(page) - - // Check that there are two inodes with correct data. - if !n.isLeaf { - t.Fatal("expected leaf") - } - if len(n.inodes) != 2 { - t.Fatalf("exp=2; got=%d", len(n.inodes)) - } - if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } -} - -// Ensure that a node can serialize into a leaf page. -func TestNode_write_LeafPage(t *testing.T) { - // Create a node. - n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0) - n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0) - n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0) - - // Write it to a page. - var buf [4096]byte - p := (*page)(unsafe.Pointer(&buf[0])) - n.write(p) - - // Read the page back in. - n2 := &node{} - n2.read(p) - - // Check that the two pages are the same. - if len(n2.inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(n2.inodes)) - } - if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } -} - -// Ensure that a node can split into appropriate subgroups. -func TestNode_split(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) - - // Split between 2 & 3. - n.split(100) - - var parent = n.parent - if len(parent.children) != 2 { - t.Fatalf("exp=2; got=%d", len(parent.children)) - } - if len(parent.children[0].inodes) != 2 { - t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes)) - } - if len(parent.children[1].inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes)) - } -} - -// Ensure that a page with the minimum number of inodes just returns a single node. -func TestNode_split_MinKeys(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - - // Split. - n.split(20) - if n.parent != nil { - t.Fatalf("expected nil parent") - } -} - -// Ensure that a node that has keys that all fit on a page just returns one leaf. -func TestNode_split_SinglePage(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) - - // Split. - n.split(4096) - if n.parent != nil { - t.Fatalf("expected nil parent") - } -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/page.go b/Godeps/_workspace/src/github.com/boltdb/bolt/page.go deleted file mode 100644 index 818aa1b15..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/page.go +++ /dev/null @@ -1,172 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) - -const minKeysPerPage = 2 - -const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) -const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 - ptr uintptr -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafe.Pointer(&p.ptr)) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize] -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } else if len(b) == 0 { - return a - } - - // Create a list to hold all elements from both lists. - merged := make(pgids, 0, len(a)+len(b)) - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - merged = append(merged, follow...) - - return merged -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go deleted file mode 100644 index 59f4a30ed..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package bolt - -import ( - "reflect" - "sort" - "testing" - "testing/quick" -) - -// Ensure that the page type can be returned in human readable format. -func TestPage_typ(t *testing.T) { - if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" { - t.Fatalf("exp=branch; got=%v", typ) - } - if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" { - t.Fatalf("exp=leaf; got=%v", typ) - } - if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" { - t.Fatalf("exp=meta; got=%v", typ) - } - if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" { - t.Fatalf("exp=freelist; got=%v", typ) - } - if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" { - t.Fatalf("exp=unknown<4e20>; got=%v", typ) - } -} - -// Ensure that the hexdump debugging function doesn't blow up. -func TestPage_dump(t *testing.T) { - (&page{id: 256}).hexdump(16) -} - -func TestPgids_merge(t *testing.T) { - a := pgids{4, 5, 6, 10, 11, 12, 13, 27} - b := pgids{1, 3, 8, 9, 25, 30} - c := a.merge(b) - if !reflect.DeepEqual(c, pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) { - t.Errorf("mismatch: %v", c) - } - - a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36} - b = pgids{8, 9, 25, 30} - c = a.merge(b) - if !reflect.DeepEqual(c, pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) { - t.Errorf("mismatch: %v", c) - } -} - -func TestPgids_merge_quick(t *testing.T) { - if err := quick.Check(func(a, b pgids) bool { - // Sort incoming lists. - sort.Sort(a) - sort.Sort(b) - - // Merge the two lists together. - got := a.merge(b) - - // The expected value should be the two lists combined and sorted. - exp := append(a, b...) - sort.Sort(exp) - - if !reflect.DeepEqual(exp, got) { - t.Errorf("\nexp=%+v\ngot=%+v\n", exp, got) - return false - } - - return true - }, nil); err != nil { - t.Fatal(err) - } -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go deleted file mode 100644 index 4da581775..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package bolt_test - -import ( - "bytes" - "flag" - "fmt" - "math/rand" - "os" - "reflect" - "testing/quick" - "time" -) - -// testing/quick defaults to 5 iterations and a random seed. -// You can override these settings from the command line: -// -// -quick.count The number of iterations to perform. -// -quick.seed The seed to use for randomizing. -// -quick.maxitems The maximum number of items to insert into a DB. -// -quick.maxksize The maximum size of a key. -// -quick.maxvsize The maximum size of a value. -// - -var qcount, qseed, qmaxitems, qmaxksize, qmaxvsize int - -func init() { - flag.IntVar(&qcount, "quick.count", 5, "") - flag.IntVar(&qseed, "quick.seed", int(time.Now().UnixNano())%100000, "") - flag.IntVar(&qmaxitems, "quick.maxitems", 1000, "") - flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "") - flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "") - flag.Parse() - fmt.Fprintln(os.Stderr, "seed:", qseed) - fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize) -} - -func qconfig() *quick.Config { - return &quick.Config{ - MaxCount: qcount, - Rand: rand.New(rand.NewSource(int64(qseed))), - } -} - -type testdata []testdataitem - -func (t testdata) Len() int { return len(t) } -func (t testdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == -1 } - -func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value { - n := rand.Intn(qmaxitems-1) + 1 - items := make(testdata, n) - for i := 0; i < n; i++ { - item := &items[i] - item.Key = randByteSlice(rand, 1, qmaxksize) - item.Value = randByteSlice(rand, 0, qmaxvsize) - } - return reflect.ValueOf(items) -} - -type revtestdata []testdataitem - -func (t revtestdata) Len() int { return len(t) } -func (t revtestdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t revtestdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == 1 } - -type testdataitem struct { - Key []byte - Value []byte -} - -func randByteSlice(rand *rand.Rand, minSize, maxSize int) []byte { - n := rand.Intn(maxSize-minSize) + minSize - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go deleted file mode 100644 index ceb8baef0..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go +++ /dev/null @@ -1,327 +0,0 @@ -package bolt_test - -import ( - "bytes" - "fmt" - "math/rand" - "sync" - "testing" - - "github.com/boltdb/bolt" -) - -func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) } -func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) } -func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) } -func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) } -func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, 10000, 1) } - -func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) } -func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) } -func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) } -func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, 10000, 10) } - -func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) } -func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) } -func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, 10000, 100) } - -func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, 10000, 1000) } - -// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety. -func testSimulate(t *testing.T, threadCount, parallelism int) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - rand.Seed(int64(qseed)) - - // A list of operations that readers and writers can perform. - var readerHandlers = []simulateHandler{simulateGetHandler} - var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler} - - var versions = make(map[int]*QuickDB) - versions[1] = NewQuickDB() - - db := NewTestDB() - defer db.Close() - - var mutex sync.Mutex - - // Run n threads in parallel, each with their own operation. - var wg sync.WaitGroup - var threads = make(chan bool, parallelism) - var i int - for { - threads <- true - wg.Add(1) - writable := ((rand.Int() % 100) < 20) // 20% writers - - // Choose an operation to execute. - var handler simulateHandler - if writable { - handler = writerHandlers[rand.Intn(len(writerHandlers))] - } else { - handler = readerHandlers[rand.Intn(len(readerHandlers))] - } - - // Execute a thread for the given operation. - go func(writable bool, handler simulateHandler) { - defer wg.Done() - - // Start transaction. - tx, err := db.Begin(writable) - if err != nil { - t.Fatal("tx begin: ", err) - } - - // Obtain current state of the dataset. - mutex.Lock() - var qdb = versions[tx.ID()] - if writable { - qdb = versions[tx.ID()-1].Copy() - } - mutex.Unlock() - - // Make sure we commit/rollback the tx at the end and update the state. - if writable { - defer func() { - mutex.Lock() - versions[tx.ID()] = qdb - mutex.Unlock() - - ok(t, tx.Commit()) - }() - } else { - defer tx.Rollback() - } - - // Ignore operation if we don't have data yet. - if qdb == nil { - return - } - - // Execute handler. - handler(tx, qdb) - - // Release a thread back to the scheduling loop. - <-threads - }(writable, handler) - - i++ - if i > threadCount { - break - } - } - - // Wait until all threads are done. - wg.Wait() -} - -type simulateHandler func(tx *bolt.Tx, qdb *QuickDB) - -// Retrieves a key from the database and verifies that it is what is expected. -func simulateGetHandler(tx *bolt.Tx, qdb *QuickDB) { - // Randomly retrieve an existing exist. - keys := qdb.Rand() - if len(keys) == 0 { - return - } - - // Retrieve root bucket. - b := tx.Bucket(keys[0]) - if b == nil { - panic(fmt.Sprintf("bucket[0] expected: %08x\n", trunc(keys[0], 4))) - } - - // Drill into nested buckets. - for _, key := range keys[1 : len(keys)-1] { - b = b.Bucket(key) - if b == nil { - panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key)) - } - } - - // Verify key/value on the final bucket. - expected := qdb.Get(keys) - actual := b.Get(keys[len(keys)-1]) - if !bytes.Equal(actual, expected) { - fmt.Println("=== EXPECTED ===") - fmt.Println(expected) - fmt.Println("=== ACTUAL ===") - fmt.Println(actual) - fmt.Println("=== END ===") - panic("value mismatch") - } -} - -// Inserts a key into the database. -func simulatePutHandler(tx *bolt.Tx, qdb *QuickDB) { - var err error - keys, value := randKeys(), randValue() - - // Retrieve root bucket. - b := tx.Bucket(keys[0]) - if b == nil { - b, err = tx.CreateBucket(keys[0]) - if err != nil { - panic("create bucket: " + err.Error()) - } - } - - // Create nested buckets, if necessary. - for _, key := range keys[1 : len(keys)-1] { - child := b.Bucket(key) - if child != nil { - b = child - } else { - b, err = b.CreateBucket(key) - if err != nil { - panic("create bucket: " + err.Error()) - } - } - } - - // Insert into database. - if err := b.Put(keys[len(keys)-1], value); err != nil { - panic("put: " + err.Error()) - } - - // Insert into in-memory database. - qdb.Put(keys, value) -} - -// QuickDB is an in-memory database that replicates the functionality of the -// Bolt DB type except that it is entirely in-memory. It is meant for testing -// that the Bolt database is consistent. -type QuickDB struct { - sync.RWMutex - m map[string]interface{} -} - -// NewQuickDB returns an instance of QuickDB. -func NewQuickDB() *QuickDB { - return &QuickDB{m: make(map[string]interface{})} -} - -// Get retrieves the value at a key path. -func (db *QuickDB) Get(keys [][]byte) []byte { - db.RLock() - defer db.RUnlock() - - m := db.m - for _, key := range keys[:len(keys)-1] { - value := m[string(key)] - if value == nil { - return nil - } - switch value := value.(type) { - case map[string]interface{}: - m = value - case []byte: - return nil - } - } - - // Only return if it's a simple value. - if value, ok := m[string(keys[len(keys)-1])].([]byte); ok { - return value - } - return nil -} - -// Put inserts a value into a key path. -func (db *QuickDB) Put(keys [][]byte, value []byte) { - db.Lock() - defer db.Unlock() - - // Build buckets all the way down the key path. - m := db.m - for _, key := range keys[:len(keys)-1] { - if _, ok := m[string(key)].([]byte); ok { - return // Keypath intersects with a simple value. Do nothing. - } - - if m[string(key)] == nil { - m[string(key)] = make(map[string]interface{}) - } - m = m[string(key)].(map[string]interface{}) - } - - // Insert value into the last key. - m[string(keys[len(keys)-1])] = value -} - -// Rand returns a random key path that points to a simple value. -func (db *QuickDB) Rand() [][]byte { - db.RLock() - defer db.RUnlock() - if len(db.m) == 0 { - return nil - } - var keys [][]byte - db.rand(db.m, &keys) - return keys -} - -func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) { - i, index := 0, rand.Intn(len(m)) - for k, v := range m { - if i == index { - *keys = append(*keys, []byte(k)) - if v, ok := v.(map[string]interface{}); ok { - db.rand(v, keys) - } - return - } - i++ - } - panic("quickdb rand: out-of-range") -} - -// Copy copies the entire database. -func (db *QuickDB) Copy() *QuickDB { - db.RLock() - defer db.RUnlock() - return &QuickDB{m: db.copy(db.m)} -} - -func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} { - clone := make(map[string]interface{}, len(m)) - for k, v := range m { - switch v := v.(type) { - case map[string]interface{}: - clone[k] = db.copy(v) - default: - clone[k] = v - } - } - return clone -} - -func randKey() []byte { - var min, max = 1, 1024 - n := rand.Intn(max-min) + min - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} - -func randKeys() [][]byte { - var keys [][]byte - var count = rand.Intn(2) + 2 - for i := 0; i < count; i++ { - keys = append(keys, randKey()) - } - return keys -} - -func randValue() []byte { - n := rand.Intn(8192) - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go b/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go deleted file mode 100644 index 6b52b2c89..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go +++ /dev/null @@ -1,611 +0,0 @@ -package bolt - -import ( - "fmt" - "io" - "os" - "sort" - "time" - "unsafe" -) - -// txid represents the internal transaction identifier. -type txid uint64 - -// Tx represents a read-only or read/write transaction on the database. -// Read-only transactions can be used for retrieving values for keys and creating cursors. -// Read/write transactions can create and remove buckets and create and remove keys. -// -// IMPORTANT: You must commit or rollback transactions when you are done with -// them. Pages can not be reclaimed by the writer until no more transactions -// are using them. A long running read transaction can cause the database to -// quickly grow. -type Tx struct { - writable bool - managed bool - db *DB - meta *meta - root Bucket - pages map[pgid]*page - stats TxStats - commitHandlers []func() -} - -// init initializes the transaction. -func (tx *Tx) init(db *DB) { - tx.db = db - tx.pages = nil - - // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) - - // Copy over the root bucket. - tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root - - // Increment the transaction id and add a page cache for writable transactions. - if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) - } -} - -// ID returns the transaction id. -func (tx *Tx) ID() int { - return int(tx.meta.txid) -} - -// DB returns a reference to the database that created the transaction. -func (tx *Tx) DB() *DB { - return tx.db -} - -// Size returns current database size in bytes as seen by this transaction. -func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) -} - -// Writable returns whether the transaction can perform write operations. -func (tx *Tx) Writable() bool { - return tx.writable -} - -// Cursor creates a cursor associated with the root bucket. -// All items in the cursor will return a nil value because all root bucket keys point to buckets. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (tx *Tx) Cursor() *Cursor { - return tx.root.Cursor() -} - -// Stats retrieves a copy of the current transaction statistics. -func (tx *Tx) Stats() TxStats { - return tx.stats -} - -// Bucket retrieves a bucket by name. -// Returns nil if the bucket does not exist. -func (tx *Tx) Bucket(name []byte) *Bucket { - return tx.root.Bucket(name) -} - -// CreateBucket creates a new bucket. -// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. -func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { - return tx.root.CreateBucket(name) -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { - return tx.root.CreateBucketIfNotExists(name) -} - -// DeleteBucket deletes a bucket. -// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. -func (tx *Tx) DeleteBucket(name []byte) error { - return tx.root.DeleteBucket(name) -} - -// ForEach executes a function for each bucket in the root. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. -func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { - return tx.root.ForEach(func(k, v []byte) error { - if err := fn(k, tx.root.Bucket(k)); err != nil { - return err - } - return nil - }) -} - -// OnCommit adds a handler function to be executed after the transaction successfully commits. -func (tx *Tx) OnCommit(fn func()) { - tx.commitHandlers = append(tx.commitHandlers, fn) -} - -// Commit writes all changes to disk and updates the meta page. -// Returns an error if a disk write error occurs, or if Commit is -// called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") - if tx.db == nil { - return ErrTxClosed - } else if !tx.writable { - return ErrTxNotWritable - } - - // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. - - // Rebalance nodes which have had deletions. - var startTime = time.Now() - tx.root.rebalance() - if tx.stats.Rebalance > 0 { - tx.stats.RebalanceTime += time.Since(startTime) - } - - // spill data onto dirty pages. - startTime = time.Now() - if err := tx.root.spill(); err != nil { - tx.rollback() - return err - } - tx.stats.SpillTime += time.Since(startTime) - - // Free the old root bucket. - tx.meta.root.root = tx.root.root - - // Free the freelist and allocate new pages for it. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err - } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - - // Write dirty pages to disk. - startTime = time.Now() - if err := tx.write(); err != nil { - tx.rollback() - return err - } - - // If strict mode is enabled then perform a consistency check. - // Only the first consistency error is reported in the panic. - if tx.db.StrictMode { - if err, ok := <-tx.Check(); ok { - panic("check fail: " + err.Error()) - } - } - - // Write meta to disk. - if err := tx.writeMeta(); err != nil { - tx.rollback() - return err - } - tx.stats.WriteTime += time.Since(startTime) - - // Finalize the transaction. - tx.close() - - // Execute commit handlers now that the locks have been removed. - for _, fn := range tx.commitHandlers { - fn() - } - - return nil -} - -// Rollback closes the transaction and ignores all previous updates. Read-only -// transactions must be rolled back and not committed. -func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") - if tx.db == nil { - return ErrTxClosed - } - tx.rollback() - return nil -} - -func (tx *Tx) rollback() { - if tx.db == nil { - return - } - if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) - } - tx.close() -} - -func (tx *Tx) close() { - if tx.db == nil { - return - } - if tx.writable { - // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() - - // Remove writer lock. - tx.db.rwlock.Unlock() - - // Merge statistics. - tx.db.statlock.Lock() - tx.db.stats.FreePageN = freelistFreeN - tx.db.stats.PendingPageN = freelistPendingN - tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize - tx.db.stats.FreelistInuse = freelistAlloc - tx.db.stats.TxStats.add(&tx.stats) - tx.db.statlock.Unlock() - } else { - tx.db.removeTx(tx) - } - tx.db = nil -} - -// Copy writes the entire database to a writer. -// This function exists for backwards compatibility. Use WriteTo() in -func (tx *Tx) Copy(w io.Writer) error { - _, err := tx.WriteTo(w) - return err -} - -// WriteTo writes the entire database to a writer. -// If err == nil then exactly tx.Size() bytes will be written into the writer. -func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader directly. - var f *os.File - if f, err = os.OpenFile(tx.db.path, os.O_RDONLY|odirect, 0); err != nil { - // Fallback to a regular open if that doesn't work. - if f, err = os.OpenFile(tx.db.path, os.O_RDONLY, 0); err != nil { - return 0, err - } - } - - // Copy the meta pages. - tx.db.metalock.Lock() - n, err = io.CopyN(w, f, int64(tx.db.pageSize*2)) - tx.db.metalock.Unlock() - if err != nil { - _ = f.Close() - return n, fmt.Errorf("meta copy: %s", err) - } - - // Copy data pages. - wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) - n += wn - if err != nil { - _ = f.Close() - return n, err - } - - return n, f.Close() -} - -// CopyFile copies the entire database to file at the given path. -// A reader transaction is maintained during the copy so it is safe to continue -// using the database while a copy is in progress. -func (tx *Tx) CopyFile(path string, mode os.FileMode) error { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) - if err != nil { - return err - } - - err = tx.Copy(f) - if err != nil { - _ = f.Close() - return err - } - return f.Close() -} - -// Check performs several consistency checks on the database for this transaction. -// An error is returned if any inconsistency is found. -// -// It can be safely run concurrently on a writable transaction. However, this -// incurs a high cost for large databases and databases with a lot of subbuckets -// because of caching. This overhead can be removed if running on a read-only -// transaction, however, it is not safe to execute other writer transactions at -// the same time. -func (tx *Tx) Check() <-chan error { - ch := make(chan error) - go tx.check(ch) - return ch -} - -func (tx *Tx) check(ch chan error) { - // Check if any pages are double freed. - freed := make(map[pgid]bool) - for _, id := range tx.db.freelist.all() { - if freed[id] { - ch <- fmt.Errorf("page %d: already freed", id) - } - freed[id] = true - } - - // Track every reachable page. - reachable := make(map[pgid]*page) - reachable[0] = tx.page(0) // meta0 - reachable[1] = tx.page(1) // meta1 - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) - } - - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, ch) - - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) - } - } - - // Close the channel to signal completion. - close(ch) -} - -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } - - // Check every page used by this bucket. - b.tx.forEachPage(b.root, 0, func(p *page, _ int) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) - } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references", int(id)) - } - reachable[id] = p - } - - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) - } - }) - - // Check each bucket within this bucket. - _ = b.ForEach(func(k, v []byte) error { - if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, ch) - } - return nil - }) -} - -// allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(count) - if err != nil { - return nil, err - } - - // Save to our page cache. - tx.pages[p.id] = p - - // Update statistics. - tx.stats.PageCount++ - tx.stats.PageAlloc += count * tx.db.pageSize - - return p, nil -} - -// write writes any dirty pages to disk. -func (tx *Tx) write() error { - // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) - for _, p := range tx.pages { - pages = append(pages, p) - } - sort.Sort(pages) - - // Write pages to disk in order. - for _, p := range pages { - size := (int(p.overflow) + 1) * tx.db.pageSize - offset := int64(p.id) * int64(tx.db.pageSize) - - // Write out page in "max allocation" sized chunks. - ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) - for { - // Limit our write to our max allocation size. - sz := size - if sz > maxAllocSize-1 { - sz = maxAllocSize - 1 - } - - // Write chunk to disk. - buf := ptr[:sz] - if _, err := tx.db.ops.writeAt(buf, offset); err != nil { - return err - } - - // Update statistics. - tx.stats.Write++ - - // Exit inner for loop if we've written all the chunks. - size -= sz - if size == 0 { - break - } - - // Otherwise move offset forward and move pointer to next chunk. - offset += int64(sz) - ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) - } - } - - // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Clear out page cache. - tx.pages = make(map[pgid]*page) - - return nil -} - -// writeMeta writes the meta to the disk. -func (tx *Tx) writeMeta() error { - // Create a temporary buffer for the meta page. - buf := make([]byte, tx.db.pageSize) - p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) - - // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { - return err - } - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Update statistics. - tx.stats.Write++ - - return nil -} - -// page returns a reference to the page with a given id. -// If page has been written to then a temporary bufferred page is returned. -func (tx *Tx) page(id pgid) *page { - // Check the dirty pages first. - if tx.pages != nil { - if p, ok := tx.pages[id]; ok { - return p - } - } - - // Otherwise return directly from the mmap. - return tx.db.page(id) -} - -// forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { - p := tx.page(pgid) - - // Execute function. - fn(p, depth) - - // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPage(elem.pgid, depth+1, fn) - } - } -} - -// Page returns page information for a given page number. -// This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { - if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { - return nil, nil - } - - // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ - ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), - } - - // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { - info.Type = "free" - } else { - info.Type = p.typ() - } - - return info, nil -} - -// TxStats represents statistics about the actions performed by the transaction. -type TxStats struct { - // Page statistics. - PageCount int // number of page allocations - PageAlloc int // total bytes allocated - - // Cursor statistics. - CursorCount int // number of cursors created - - // Node statistics - NodeCount int // number of node allocations - NodeDeref int // number of node dereferences - - // Rebalance statistics. - Rebalance int // number of node rebalances - RebalanceTime time.Duration // total time spent rebalancing - - // Split/Spill statistics. - Split int // number of nodes split - Spill int // number of nodes spilled - SpillTime time.Duration // total time spent spilling - - // Write statistics. - Write int // number of writes performed - WriteTime time.Duration // total time spent writing to disk -} - -func (s *TxStats) add(other *TxStats) { - s.PageCount += other.PageCount - s.PageAlloc += other.PageAlloc - s.CursorCount += other.CursorCount - s.NodeCount += other.NodeCount - s.NodeDeref += other.NodeDeref - s.Rebalance += other.Rebalance - s.RebalanceTime += other.RebalanceTime - s.Split += other.Split - s.Spill += other.Spill - s.SpillTime += other.SpillTime - s.Write += other.Write - s.WriteTime += other.WriteTime -} - -// Sub calculates and returns the difference between two sets of transaction stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *TxStats) Sub(other *TxStats) TxStats { - var diff TxStats - diff.PageCount = s.PageCount - other.PageCount - diff.PageAlloc = s.PageAlloc - other.PageAlloc - diff.CursorCount = s.CursorCount - other.CursorCount - diff.NodeCount = s.NodeCount - other.NodeCount - diff.NodeDeref = s.NodeDeref - other.NodeDeref - diff.Rebalance = s.Rebalance - other.Rebalance - diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime - diff.Split = s.Split - other.Split - diff.Spill = s.Spill - other.Spill - diff.SpillTime = s.SpillTime - other.SpillTime - diff.Write = s.Write - other.Write - diff.WriteTime = s.WriteTime - other.WriteTime - return diff -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go deleted file mode 100644 index 6c8271a60..000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go +++ /dev/null @@ -1,456 +0,0 @@ -package bolt_test - -import ( - "errors" - "fmt" - "os" - "testing" - - "github.com/boltdb/bolt" -) - -// Ensure that committing a closed transaction returns an error. -func TestTx_Commit_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("foo")) - ok(t, tx.Commit()) - equals(t, tx.Commit(), bolt.ErrTxClosed) -} - -// Ensure that rolling back a closed transaction returns an error. -func TestTx_Rollback_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - ok(t, tx.Rollback()) - equals(t, tx.Rollback(), bolt.ErrTxClosed) -} - -// Ensure that committing a read-only transaction returns an error. -func TestTx_Commit_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(false) - equals(t, tx.Commit(), bolt.ErrTxNotWritable) -} - -// Ensure that a transaction can retrieve a cursor on the root bucket. -func TestTx_Cursor(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.CreateBucket([]byte("woojits")) - c := tx.Cursor() - - k, v := c.First() - equals(t, "widgets", string(k)) - assert(t, v == nil, "") - - k, v = c.Next() - equals(t, "woojits", string(k)) - assert(t, v == nil, "") - - k, v = c.Next() - assert(t, k == nil, "") - assert(t, v == nil, "") - - return nil - }) -} - -// Ensure that creating a bucket with a read-only transaction returns an error. -func TestTx_CreateBucket_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.View(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("foo")) - assert(t, b == nil, "") - equals(t, bolt.ErrTxNotWritable, err) - return nil - }) -} - -// Ensure that creating a bucket on a closed transaction returns an error. -func TestTx_CreateBucket_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.Commit() - b, err := tx.CreateBucket([]byte("foo")) - assert(t, b == nil, "") - equals(t, bolt.ErrTxClosed, err) -} - -// Ensure that a Tx can retrieve a bucket. -func TestTx_Bucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - assert(t, b != nil, "") - return nil - }) -} - -// Ensure that a Tx retrieving a non-existent key returns nil. -func TestTx_Get_Missing(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key")) - assert(t, value == nil, "") - return nil - }) -} - -// Ensure that a bucket can be created and retrieved. -func TestTx_CreateBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - - // Create a bucket. - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - return nil - }) - - // Read the bucket through a separate transaction. - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - assert(t, b != nil, "") - return nil - }) -} - -// Ensure that a bucket can be created if it doesn't already exist. -func TestTx_CreateBucketIfNotExists(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - - b, err = tx.CreateBucketIfNotExists([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - - b, err = tx.CreateBucketIfNotExists([]byte{}) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketNameRequired, err) - - b, err = tx.CreateBucketIfNotExists(nil) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketNameRequired, err) - return nil - }) - - // Read the bucket through a separate transaction. - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - assert(t, b != nil, "") - return nil - }) -} - -// Ensure that a bucket cannot be created twice. -func TestTx_CreateBucket_Exists(t *testing.T) { - db := NewTestDB() - defer db.Close() - // Create a bucket. - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - return nil - }) - - // Create the same bucket again. - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketExists, err) - return nil - }) -} - -// Ensure that a bucket is created with a non-blank name. -func TestTx_CreateBucket_NameRequired(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket(nil) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketNameRequired, err) - return nil - }) -} - -// Ensure that a bucket can be deleted. -func TestTx_DeleteBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - - // Create a bucket and add a value. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - return nil - }) - - // Delete the bucket and make sure we can't get the value. - db.Update(func(tx *bolt.Tx) error { - ok(t, tx.DeleteBucket([]byte("widgets"))) - assert(t, tx.Bucket([]byte("widgets")) == nil, "") - return nil - }) - - db.Update(func(tx *bolt.Tx) error { - // Create the bucket again and make sure there's not a phantom value. - b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") - return nil - }) -} - -// Ensure that deleting a bucket on a closed transaction returns an error. -func TestTx_DeleteBucket_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.Commit() - equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxClosed) -} - -// Ensure that deleting a bucket with a read-only transaction returns an error. -func TestTx_DeleteBucket_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.View(func(tx *bolt.Tx) error { - equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxNotWritable) - return nil - }) -} - -// Ensure that nothing happens when deleting a bucket that doesn't exist. -func TestTx_DeleteBucket_NotFound(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - equals(t, bolt.ErrBucketNotFound, tx.DeleteBucket([]byte("widgets"))) - return nil - }) -} - -// Ensure that no error is returned when a tx.ForEach function does not return -// an error. -func TestTx_ForEach_NoError(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - - equals(t, nil, tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return nil - })) - return nil - }) -} - -// Ensure that an error is returned when a tx.ForEach function returns an error. -func TestTx_ForEach_WithError(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - - err := errors.New("foo") - equals(t, err, tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return err - })) - return nil - }) -} - -// Ensure that Tx commit handlers are called after a transaction successfully commits. -func TestTx_OnCommit(t *testing.T) { - var x int - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.OnCommit(func() { x += 1 }) - tx.OnCommit(func() { x += 2 }) - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - equals(t, 3, x) -} - -// Ensure that Tx commit handlers are NOT called after a transaction rolls back. -func TestTx_OnCommit_Rollback(t *testing.T) { - var x int - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.OnCommit(func() { x += 1 }) - tx.OnCommit(func() { x += 2 }) - tx.CreateBucket([]byte("widgets")) - return errors.New("rollback this commit") - }) - equals(t, 0, x) -} - -// Ensure that the database can be copied to a file path. -func TestTx_CopyFile(t *testing.T) { - db := NewTestDB() - defer db.Close() - var dest = tempfile() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) - return nil - }) - - ok(t, db.View(func(tx *bolt.Tx) error { return tx.CopyFile(dest, 0600) })) - - db2, err := bolt.Open(dest, 0600, nil) - ok(t, err) - defer db2.Close() - - db2.View(func(tx *bolt.Tx) error { - equals(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo"))) - equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) - return nil - }) -} - -type failWriterError struct{} - -func (failWriterError) Error() string { - return "error injected for tests" -} - -type failWriter struct { - // fail after this many bytes - After int -} - -func (f *failWriter) Write(p []byte) (n int, err error) { - n = len(p) - if n > f.After { - n = f.After - err = failWriterError{} - } - f.After -= n - return n, err -} - -// Ensure that Copy handles write errors right. -func TestTx_CopyFile_Error_Meta(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) - return nil - }) - - err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{}) }) - equals(t, err.Error(), "meta copy: error injected for tests") -} - -// Ensure that Copy handles write errors right. -func TestTx_CopyFile_Error_Normal(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) - return nil - }) - - err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{3 * db.Info().PageSize}) }) - equals(t, err.Error(), "error injected for tests") -} - -func ExampleTx_Rollback() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Create a bucket. - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - - // Set a value for a key. - db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - }) - - // Update the key but rollback the transaction so it never saves. - tx, _ := db.Begin(true) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("foo"), []byte("baz")) - tx.Rollback() - - // Ensure that our original value is still set. - db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value for 'foo' is still: %s\n", value) - return nil - }) - - // Output: - // The value for 'foo' is still: bar -} - -func ExampleTx_CopyFile() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Create a bucket and a key. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - return nil - }) - - // Copy the database to another file. - toFile := tempfile() - db.View(func(tx *bolt.Tx) error { return tx.CopyFile(toFile, 0666) }) - defer os.Remove(toFile) - - // Open the cloned database. - db2, _ := bolt.Open(toFile, 0666, nil) - defer db2.Close() - - // Ensure that the key exists in the copy. - db2.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value for 'foo' in the clone is: %s\n", value) - return nil - }) - - // Output: - // The value for 'foo' in the clone is: bar -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/.gitignore b/Godeps/_workspace/src/github.com/cenkalti/backoff/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/.travis.yml b/Godeps/_workspace/src/github.com/cenkalti/backoff/.travis.yml deleted file mode 100644 index ce9cb6233..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/.travis.yml +++ /dev/null @@ -1,2 +0,0 @@ -language: go -go: 1.3.3 diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/LICENSE b/Godeps/_workspace/src/github.com/cenkalti/backoff/LICENSE deleted file mode 100644 index 89b817996..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/README.md b/Godeps/_workspace/src/github.com/cenkalti/backoff/README.md deleted file mode 100644 index 020b8fbf3..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] - -This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. - -[Exponential backoff][exponential backoff wiki] -is an algorithm that uses feedback to multiplicatively decrease the rate of some process, -in order to gradually find an acceptable rate. -The retries exponentially increase and stop increasing when a certain threshold is met. - -## How To - -We define two functions, `Retry()` and `RetryNotify()`. -They receive an `Operation` to execute, a `BackOff` algorithm, -and an optional `Notify` error handler. - -The operation will be executed, and will be retried on failure with delay -as given by the backoff algorithm. The backoff algorithm can also decide when to stop -retrying. -In addition, the notify error handler will be called after each failed attempt, -except for the last time, whose error should be handled by the caller. - -```go -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -func Retry(Operation, BackOff) error -func RetryNotify(Operation, BackOff, Notify) -``` - -## Examples - -See more advanced examples in the [godoc][advanced example]. - -### Retry - -Simple retry helper that uses the default exponential backoff algorithm: - -```go -operation := func() error { - // An operation that might fail. - return nil // or return errors.New("some error") -} - -err := Retry(operation, NewExponentialBackOff()) -if err != nil { - // Handle error. - return err -} - -// Operation is successful. -return nil -``` - -### Ticker - -```go -operation := func() error { - // An operation that might fail - return nil // or return errors.New("some error") -} - -b := NewExponentialBackOff() -ticker := NewTicker(b) - -var err error - -// Ticks will continue to arrive when the previous operation is still running, -// so operations that take a while to fail could run in quick succession. -for range ticker.C { - if err = operation(); err != nil { - log.Println(err, "will retry...") - continue - } - - ticker.Stop() - break -} - -if err != nil { - // Operation has failed. - return err -} - -// Operation is successful. -return nil -``` - -## Getting Started - -```bash -# install -$ go get github.com/cenkalti/backoff - -# test -$ cd $GOPATH/src/github.com/cenkalti/backoff -$ go get -t ./... -$ go test -v -cover -``` - -[godoc]: https://godoc.org/github.com/cenkalti/backoff -[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png - -[google-http-java-client]: https://github.com/google/google-http-java-client -[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff - -[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/adv_example_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/adv_example_test.go deleted file mode 100644 index 3fe6783b8..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/adv_example_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package backoff - -import ( - "io/ioutil" - "log" - "net/http" - "time" -) - -// This is an example that demonstrates how this package could be used -// to perform various advanced operations. -// -// It executes an HTTP GET request with exponential backoff, -// while errors are logged and failed responses are closed, as required by net/http package. -// -// Note we define a condition function which is used inside the operation to -// determine whether the operation succeeded or failed. -func Example() error { - res, err := GetWithRetry( - "http://localhost:9999", - ErrorIfStatusCodeIsNot(http.StatusOK), - NewExponentialBackOff()) - - if err != nil { - // Close response body of last (failed) attempt. - // The Last attempt isn't handled by the notify-on-error function, - // which closes the body of all the previous attempts. - if e := res.Body.Close(); e != nil { - log.Printf("error closing last attempt's response body: %s", e) - } - log.Printf("too many failed request attempts: %s", err) - return err - } - defer res.Body.Close() // The response's Body must be closed. - - // Read body - _, _ = ioutil.ReadAll(res.Body) - - // Do more stuff - return nil -} - -// GetWithRetry is a helper function that performs an HTTP GET request -// to the given URL, and retries with the given backoff using the given condition function. -// -// It also uses a notify-on-error function which logs -// and closes the response body of the failed request. -func GetWithRetry(url string, condition Condition, bck BackOff) (*http.Response, error) { - var res *http.Response - err := RetryNotify( - func() error { - var err error - res, err = http.Get(url) - if err != nil { - return err - } - return condition(res) - }, - bck, - LogAndClose()) - - return res, err -} - -// Condition is a retry condition function. -// It receives a response, and returns an error -// if the response failed the condition. -type Condition func(*http.Response) error - -// ErrorIfStatusCodeIsNot returns a retry condition function. -// The condition returns an error -// if the given response's status code is not the given HTTP status code. -func ErrorIfStatusCodeIsNot(status int) Condition { - return func(res *http.Response) error { - if res.StatusCode != status { - return NewError(res) - } - return nil - } -} - -// Error is returned on ErrorIfX() condition functions throughout this package. -type Error struct { - Response *http.Response -} - -func NewError(res *http.Response) *Error { - // Sanity check - if res == nil { - panic("response object is nil") - } - return &Error{Response: res} -} -func (err *Error) Error() string { return "request failed" } - -// LogAndClose is a notify-on-error function. -// It logs the error and closes the response body. -func LogAndClose() Notify { - return func(err error, wait time.Duration) { - switch e := err.(type) { - case *Error: - defer e.Response.Body.Close() - - b, err := ioutil.ReadAll(e.Response.Body) - var body string - if err != nil { - body = "can't read body" - } else { - body = string(b) - } - - log.Printf("%s: %s", e.Response.Status, body) - default: - log.Println(err) - } - } -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff.go deleted file mode 100644 index 61bd6df66..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff.go +++ /dev/null @@ -1,59 +0,0 @@ -// Package backoff implements backoff algorithms for retrying operations. -// -// Also has a Retry() helper for retrying operations that may fail. -package backoff - -import "time" - -// BackOff is a backoff policy for retrying an operation. -type BackOff interface { - // NextBackOff returns the duration to wait before retrying the operation, - // or backoff.Stop to indicate that no more retries should be made. - // - // Example usage: - // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } - // - NextBackOff() time.Duration - - // Reset to initial state. - Reset() -} - -// Indicates that no more retries should be made for use in NextBackOff(). -const Stop time.Duration = -1 - -// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, -// meaning that the operation is retried immediately without waiting, indefinitely. -type ZeroBackOff struct{} - -func (b *ZeroBackOff) Reset() {} - -func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } - -// StopBackOff is a fixed backoff policy that always returns backoff.Stop for -// NextBackOff(), meaning that the operation should never be retried. -type StopBackOff struct{} - -func (b *StopBackOff) Reset() {} - -func (b *StopBackOff) NextBackOff() time.Duration { return Stop } - -// ConstantBackOff is a backoff policy that always returns the same backoff delay. -// This is in contrast to an exponential backoff policy, -// which returns a delay that grows longer as you call NextBackOff() over and over again. -type ConstantBackOff struct { - Interval time.Duration -} - -func (b *ConstantBackOff) Reset() {} -func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } - -func NewConstantBackOff(d time.Duration) *ConstantBackOff { - return &ConstantBackOff{Interval: d} -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff_test.go deleted file mode 100644 index 91f27c4f1..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package backoff - -import ( - "testing" - "time" -) - -func TestNextBackOffMillis(t *testing.T) { - subtestNextBackOff(t, 0, new(ZeroBackOff)) - subtestNextBackOff(t, Stop, new(StopBackOff)) -} - -func subtestNextBackOff(t *testing.T, expectedValue time.Duration, backOffPolicy BackOff) { - for i := 0; i < 10; i++ { - next := backOffPolicy.NextBackOff() - if next != expectedValue { - t.Errorf("got: %d expected: %d", next, expectedValue) - } - } -} - -func TestConstantBackOff(t *testing.T) { - backoff := NewConstantBackOff(time.Second) - if backoff.NextBackOff() != time.Second { - t.Error("invalid interval") - } -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/example_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/example_test.go deleted file mode 100644 index 0d1852e45..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/example_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package backoff - -import "log" - -func ExampleRetry() error { - operation := func() error { - // An operation that might fail. - return nil // or return errors.New("some error") - } - - err := Retry(operation, NewExponentialBackOff()) - if err != nil { - // Handle error. - return err - } - - // Operation is successful. - return nil -} - -func ExampleTicker() error { - operation := func() error { - // An operation that might fail - return nil // or return errors.New("some error") - } - - b := NewExponentialBackOff() - ticker := NewTicker(b) - - var err error - - // Ticks will continue to arrive when the previous operation is still running, - // so operations that take a while to fail could run in quick succession. - for _ = range ticker.C { - if err = operation(); err != nil { - log.Println(err, "will retry...") - continue - } - - ticker.Stop() - break - } - - if err != nil { - // Operation has failed. - return err - } - - // Operation is successful. - return nil -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go deleted file mode 100644 index cc2a164f2..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go +++ /dev/null @@ -1,151 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is a backoff implementation that increases the backoff -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. - -For example, given the following parameters: - - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 - -the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, -multiplied by the exponential, that is, between 2 and 6 seconds. - -Note: MaxInterval caps the RetryInterval and not the randomized interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: - - Request # RetryInterval (seconds) Randomized Interval (seconds) - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Note: Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff stops. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { - b := &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Clock: SystemClock, - } - b.Reset() - return b -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { - return Stop - } - defer b.incrementCurrentInterval() - return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the following interval: -// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go deleted file mode 100644 index 11b95e4f6..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package backoff - -import ( - "math" - "testing" - "time" -) - -func TestBackOff(t *testing.T) { - var ( - testInitialInterval = 500 * time.Millisecond - testRandomizationFactor = 0.1 - testMultiplier = 2.0 - testMaxInterval = 5 * time.Second - testMaxElapsedTime = 15 * time.Minute - ) - - exp := NewExponentialBackOff() - exp.InitialInterval = testInitialInterval - exp.RandomizationFactor = testRandomizationFactor - exp.Multiplier = testMultiplier - exp.MaxInterval = testMaxInterval - exp.MaxElapsedTime = testMaxElapsedTime - exp.Reset() - - var expectedResults = []time.Duration{500, 1000, 2000, 4000, 5000, 5000, 5000, 5000, 5000, 5000} - for i, d := range expectedResults { - expectedResults[i] = d * time.Millisecond - } - - for _, expected := range expectedResults { - assertEquals(t, expected, exp.currentInterval) - // Assert that the next backoff falls in the expected range. - var minInterval = expected - time.Duration(testRandomizationFactor*float64(expected)) - var maxInterval = expected + time.Duration(testRandomizationFactor*float64(expected)) - var actualInterval = exp.NextBackOff() - if !(minInterval <= actualInterval && actualInterval <= maxInterval) { - t.Error("error") - } - } -} - -func TestGetRandomizedInterval(t *testing.T) { - // 33% chance of being 1. - assertEquals(t, 1, getRandomValueFromInterval(0.5, 0, 2)) - assertEquals(t, 1, getRandomValueFromInterval(0.5, 0.33, 2)) - // 33% chance of being 2. - assertEquals(t, 2, getRandomValueFromInterval(0.5, 0.34, 2)) - assertEquals(t, 2, getRandomValueFromInterval(0.5, 0.66, 2)) - // 33% chance of being 3. - assertEquals(t, 3, getRandomValueFromInterval(0.5, 0.67, 2)) - assertEquals(t, 3, getRandomValueFromInterval(0.5, 0.99, 2)) -} - -type TestClock struct { - i time.Duration - start time.Time -} - -func (c *TestClock) Now() time.Time { - t := c.start.Add(c.i) - c.i += time.Second - return t -} - -func TestGetElapsedTime(t *testing.T) { - var exp = NewExponentialBackOff() - exp.Clock = &TestClock{} - exp.Reset() - - var elapsedTime = exp.GetElapsedTime() - if elapsedTime != time.Second { - t.Errorf("elapsedTime=%d", elapsedTime) - } -} - -func TestMaxElapsedTime(t *testing.T) { - var exp = NewExponentialBackOff() - exp.Clock = &TestClock{start: time.Time{}.Add(10000 * time.Second)} - // Change the currentElapsedTime to be 0 ensuring that the elapsed time will be greater - // than the max elapsed time. - exp.startTime = time.Time{} - assertEquals(t, Stop, exp.NextBackOff()) -} - -func TestBackOffOverflow(t *testing.T) { - var ( - testInitialInterval time.Duration = math.MaxInt64 / 2 - testMaxInterval time.Duration = math.MaxInt64 - testMultiplier = 2.1 - ) - - exp := NewExponentialBackOff() - exp.InitialInterval = testInitialInterval - exp.Multiplier = testMultiplier - exp.MaxInterval = testMaxInterval - exp.Reset() - - exp.NextBackOff() - // Assert that when an overflow is possible the current varerval time.Duration is set to the max varerval time.Duration . - assertEquals(t, testMaxInterval, exp.currentInterval) -} - -func assertEquals(t *testing.T, expected, value time.Duration) { - if expected != value { - t.Errorf("got: %d, expected: %d", value, expected) - } -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/retry.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/retry.go deleted file mode 100644 index f01f2bbd0..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/retry.go +++ /dev/null @@ -1,46 +0,0 @@ -package backoff - -import "time" - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the function f until it does not return error or BackOff stops. -// f is guaranteed to be run at least once. -// It is the caller's responsibility to reset b after Retry returns. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - var err error - var next time.Duration - - b.Reset() - for { - if err = operation(); err == nil { - return nil - } - - if next = b.NextBackOff(); next == Stop { - return err - } - - if notify != nil { - notify(err, next) - } - - time.Sleep(next) - } -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/retry_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/retry_test.go deleted file mode 100644 index c0d25ab76..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/retry_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package backoff - -import ( - "errors" - "log" - "testing" -) - -func TestRetry(t *testing.T) { - const successOn = 3 - var i = 0 - - // This function is successfull on "successOn" calls. - f := func() error { - i++ - log.Printf("function is called %d. time\n", i) - - if i == successOn { - log.Println("OK") - return nil - } - - log.Println("error") - return errors.New("error") - } - - err := Retry(f, NewExponentialBackOff()) - if err != nil { - t.Errorf("unexpected error: %s", err.Error()) - } - if i != successOn { - t.Errorf("invalid number of retries: %d", i) - } -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker.go deleted file mode 100644 index 7a5ff4ed1..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker.go +++ /dev/null @@ -1,79 +0,0 @@ -package backoff - -import ( - "runtime" - "sync" - "time" -) - -// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. -// -// Ticks will continue to arrive when the previous operation is still running, -// so operations that take a while to fail could run in quick succession. -type Ticker struct { - C <-chan time.Time - c chan time.Time - b BackOff - stop chan struct{} - stopOnce sync.Once -} - -// NewTicker returns a new Ticker containing a channel that will send the time at times -// specified by the BackOff argument. Ticker is guaranteed to tick at least once. -// The channel is closed when Stop method is called or BackOff stops. -func NewTicker(b BackOff) *Ticker { - c := make(chan time.Time) - t := &Ticker{ - C: c, - c: c, - b: b, - stop: make(chan struct{}), - } - go t.run() - runtime.SetFinalizer(t, (*Ticker).Stop) - return t -} - -// Stop turns off a ticker. After Stop, no more ticks will be sent. -func (t *Ticker) Stop() { - t.stopOnce.Do(func() { close(t.stop) }) -} - -func (t *Ticker) run() { - c := t.c - defer close(c) - t.b.Reset() - - // Ticker is guaranteed to tick at least once. - afterC := t.send(time.Now()) - - for { - if afterC == nil { - return - } - - select { - case tick := <-afterC: - afterC = t.send(tick) - case <-t.stop: - t.c = nil // Prevent future ticks from being sent to the channel. - return - } - } -} - -func (t *Ticker) send(tick time.Time) <-chan time.Time { - select { - case t.c <- tick: - case <-t.stop: - return nil - } - - next := t.b.NextBackOff() - if next == Stop { - t.Stop() - return nil - } - - return time.After(next) -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker_test.go deleted file mode 100644 index 7c392df46..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package backoff - -import ( - "errors" - "log" - "testing" -) - -func TestTicker(t *testing.T) { - const successOn = 3 - var i = 0 - - // This function is successfull on "successOn" calls. - f := func() error { - i++ - log.Printf("function is called %d. time\n", i) - - if i == successOn { - log.Println("OK") - return nil - } - - log.Println("error") - return errors.New("error") - } - - b := NewExponentialBackOff() - ticker := NewTicker(b) - - var err error - for _ = range ticker.C { - if err = f(); err != nil { - t.Log(err) - continue - } - - break - } - if err != nil { - t.Errorf("unexpected error: %s", err.Error()) - } - if i != successOn { - t.Errorf("invalid number of retries: %d", i) - } -} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/cache.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/cache.go deleted file mode 100644 index feb28f2a5..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/cache.go +++ /dev/null @@ -1,258 +0,0 @@ -// This code is based on encoding/json and gorilla/schema - -package encoding - -import ( - "reflect" - "sort" - "sync" -) - -// A field represents a single field found in a struct. -type field struct { - name string - nameBytes []byte // []byte(name) - equalFold func(s, t []byte) bool - - tag bool - index []int - typ reflect.Type - omitEmpty bool - quoted bool -} - -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - - return f -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that should be recognized for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := getTag(sf) - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, fillField(field{ - name: name, - tag: tagged, - index: index, - typ: ft, - omitEmpty: opts.Contains("omitempty"), - })) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with valid tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// valid tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder.go deleted file mode 100644 index f50478abb..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder.go +++ /dev/null @@ -1,141 +0,0 @@ -package encoding - -import ( - "errors" - "reflect" - "runtime" - "sync" -) - -var byteSliceType = reflect.TypeOf([]byte(nil)) - -type decoderFunc func(dv reflect.Value, sv reflect.Value) - -// Decode decodes map[string]interface{} into a struct. The first parameter -// must be a pointer. -func Decode(dst interface{}, src interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - if v, ok := r.(string); ok { - err = errors.New(v) - } else { - err = r.(error) - } - } - }() - - dv := reflect.ValueOf(dst) - sv := reflect.ValueOf(src) - if dv.Kind() != reflect.Ptr { - return &DecodeTypeError{ - DestType: dv.Type(), - SrcType: sv.Type(), - Reason: "must be a pointer", - } - } - - dv = dv.Elem() - if !dv.CanAddr() { - return &DecodeTypeError{ - DestType: dv.Type(), - SrcType: sv.Type(), - Reason: "must be addressable", - } - } - - decode(dv, sv) - return nil -} - -// decode decodes the source value into the destination value -func decode(dv, sv reflect.Value) { - valueDecoder(dv, sv)(dv, sv) -} - -type decoderCacheKey struct { - dt, st reflect.Type -} - -var decoderCache struct { - sync.RWMutex - m map[decoderCacheKey]decoderFunc -} - -func valueDecoder(dv, sv reflect.Value) decoderFunc { - if !sv.IsValid() { - return invalidValueDecoder - } - - if dv.IsValid() { - dv = indirect(dv, false) - dv.Set(reflect.Zero(dv.Type())) - } - - return typeDecoder(dv.Type(), sv.Type()) -} - -func typeDecoder(dt, st reflect.Type) decoderFunc { - decoderCache.RLock() - f := decoderCache.m[decoderCacheKey{dt, st}] - decoderCache.RUnlock() - if f != nil { - return f - } - - // To deal with recursive types, populate the map with an - // indirect func before we build it. This type waits on the - // real func (f) to be ready and then calls it. This indirect - // func is only used for recursive types. - decoderCache.Lock() - var wg sync.WaitGroup - wg.Add(1) - decoderCache.m[decoderCacheKey{dt, st}] = func(dv, sv reflect.Value) { - wg.Wait() - f(dv, sv) - } - decoderCache.Unlock() - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = newTypeDecoder(dt, st) - wg.Done() - decoderCache.Lock() - decoderCache.m[decoderCacheKey{dt, st}] = f - decoderCache.Unlock() - return f -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -func indirect(v reflect.Value, decodeNull bool) reflect.Value { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodeNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - return v -} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder_test.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder_test.go deleted file mode 100644 index 909b2cc2d..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder_test.go +++ /dev/null @@ -1,426 +0,0 @@ -package encoding - -import ( - "bytes" - "encoding/json" - "image" - "reflect" - "testing" -) - -type T struct { - X string - Y int - Z int `gorethink:"-"` -} - -type U struct { - Alphabet string `gorethink:"alpha"` -} - -type V struct { - F1 interface{} - F2 int32 - F3 string -} - -type tx struct { - x int -} - -var txType = reflect.TypeOf((*tx)(nil)).Elem() - -// Test data structures for anonymous fields. - -type Point struct { - Z int -} - -type Top struct { - Level0 int - Embed0 - *Embed0a - *Embed0b `gorethink:"e,omitempty"` // treated as named - Embed0c `gorethink:"-"` // ignored - Loop - Embed0p // has Point with X, Y, used - Embed0q // has Point with Z, used -} - -type Embed0 struct { - Level1a int // overridden by Embed0a's Level1a with tag - Level1b int // used because Embed0a's Level1b is renamed - Level1c int // used because Embed0a's Level1c is ignored - Level1d int // annihilated by Embed0a's Level1d - Level1e int `gorethink:"x"` // annihilated by Embed0a.Level1e -} - -type Embed0a struct { - Level1a int `gorethink:"Level1a,omitempty"` - Level1b int `gorethink:"LEVEL1B,omitempty"` - Level1c int `gorethink:"-"` - Level1d int // annihilated by Embed0's Level1d - Level1f int `gorethink:"x"` // annihilated by Embed0's Level1e -} - -type Embed0b Embed0 - -type Embed0c Embed0 - -type Embed0p struct { - image.Point -} - -type Embed0q struct { - Point -} - -type Loop struct { - Loop1 int `gorethink:",omitempty"` - Loop2 int `gorethink:",omitempty"` - *Loop -} - -// From reflect test: -// The X in S6 and S7 annihilate, but they also block the X in S8.S9. -type S5 struct { - S6 - S7 - S8 -} - -type S6 struct { - X int -} - -type S7 S6 - -type S8 struct { - S9 -} - -type S9 struct { - X int - Y int -} - -// From reflect test: -// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9. -type S10 struct { - S11 - S12 - S13 -} - -type S11 struct { - S6 -} - -type S12 struct { - S6 -} - -type S13 struct { - S8 -} - -type Pointer struct { - PPoint *Point - Point Point -} - -type decodeTest struct { - in interface{} - ptr interface{} - out interface{} - err error -} - -type Ambig struct { - // Given "hello", the first match should win. - First int `gorethink:"HELLO"` - Second int `gorethink:"Hello"` -} - -var decodeTests = []decodeTest{ - // basic types - {in: true, ptr: new(bool), out: true}, - {in: 1, ptr: new(int), out: 1}, - {in: 1.2, ptr: new(float64), out: 1.2}, - {in: -5, ptr: new(int16), out: int16(-5)}, - {in: 2, ptr: new(string), out: string("2")}, - {in: float64(2.0), ptr: new(interface{}), out: float64(2.0)}, - {in: string("2"), ptr: new(interface{}), out: string("2")}, - {in: "a\u1234", ptr: new(string), out: "a\u1234"}, - {in: []interface{}{}, ptr: new([]string), out: []string{}}, - {in: map[string]interface{}{"X": []interface{}{1, 2, 3}, "Y": 4}, ptr: new(T), out: T{}, err: &DecodeTypeError{reflect.TypeOf(""), reflect.TypeOf([]interface{}{}), ""}}, - {in: map[string]interface{}{"x": 1}, ptr: new(tx), out: tx{}}, - {in: map[string]interface{}{"F1": float64(1), "F2": 2, "F3": 3}, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: string("3")}}, - {in: map[string]interface{}{"F1": string("1"), "F2": 2, "F3": 3}, ptr: new(V), out: V{F1: string("1"), F2: int32(2), F3: string("3")}}, - { - in: map[string]interface{}{"k1": int64(1), "k2": "s", "k3": []interface{}{int64(1), 2.0, 3e-3}, "k4": map[string]interface{}{"kk1": "s", "kk2": int64(2)}}, - out: map[string]interface{}{"k1": int64(1), "k2": "s", "k3": []interface{}{int64(1), 2.0, 3e-3}, "k4": map[string]interface{}{"kk1": "s", "kk2": int64(2)}}, - ptr: new(interface{}), - }, - - // Z has a "-" tag. - {in: map[string]interface{}{"Y": 1, "Z": 2}, ptr: new(T), out: T{Y: 1}}, - - {in: map[string]interface{}{"alpha": "abc", "alphabet": "xyz"}, ptr: new(U), out: U{Alphabet: "abc"}}, - {in: map[string]interface{}{"alpha": "abc"}, ptr: new(U), out: U{Alphabet: "abc"}}, - {in: map[string]interface{}{"alphabet": "xyz"}, ptr: new(U), out: U{}}, - - // array tests - {in: []interface{}{1, 2, 3}, ptr: new([3]int), out: [3]int{1, 2, 3}}, - {in: []interface{}{1, 2, 3}, ptr: new([1]int), out: [1]int{1}}, - {in: []interface{}{1, 2, 3}, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}}, - - // empty array to interface test - {in: map[string]interface{}{"T": []interface{}{}}, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}}, - - { - in: map[string]interface{}{ - "Level0": 1, - "Level1b": 2, - "Level1c": 3, - "level1d": 4, - "Level1a": 5, - "LEVEL1B": 6, - "e": map[string]interface{}{ - "Level1a": 8, - "Level1b": 9, - "Level1c": 10, - "Level1d": 11, - "x": 12, - }, - "Loop1": 13, - "Loop2": 14, - "X": 15, - "Y": 16, - "Z": 17, - }, - ptr: new(Top), - out: Top{ - Level0: 1, - Embed0: Embed0{ - Level1b: 2, - Level1c: 3, - }, - Embed0a: &Embed0a{ - Level1a: 5, - Level1b: 6, - }, - Embed0b: &Embed0b{ - Level1a: 8, - Level1b: 9, - Level1c: 10, - Level1d: 11, - }, - Loop: Loop{ - Loop1: 13, - Loop2: 14, - }, - Embed0p: Embed0p{ - Point: image.Point{X: 15, Y: 16}, - }, - Embed0q: Embed0q{ - Point: Point{Z: 17}, - }, - }, - }, - { - in: map[string]interface{}{"hello": 1}, - ptr: new(Ambig), - out: Ambig{First: 1}, - }, - { - in: map[string]interface{}{"X": 1, "Y": 2}, - ptr: new(S5), - out: S5{S8: S8{S9: S9{Y: 2}}}, - }, - { - in: map[string]interface{}{"X": 1, "Y": 2}, - ptr: new(S10), - out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}}, - }, - { - in: map[string]interface{}{"PPoint": map[string]interface{}{"Z": 1}, "Point": map[string]interface{}{"Z": 2}}, - ptr: new(Pointer), - out: Pointer{PPoint: &Point{Z: 1}, Point: Point{Z: 2}}, - }, - { - in: map[string]interface{}{"Point": map[string]interface{}{"Z": 2}}, - ptr: new(Pointer), - out: Pointer{PPoint: nil, Point: Point{Z: 2}}, - }, -} - -func TestDecode(t *testing.T) { - for i, tt := range decodeTests { - if tt.ptr == nil { - continue - } - - // v = new(right-type) - v := reflect.New(reflect.TypeOf(tt.ptr).Elem()) - - err := Decode(v.Interface(), tt.in) - if !jsonEqual(err, tt.err) { - t.Errorf("#%d: got error %v want %v", i, err, tt.err) - continue - } - - if tt.err == nil && !jsonEqual(v.Elem().Interface(), tt.out) { - t.Errorf("#%d: mismatch\nhave: %+v\nwant: %+v", i, v.Elem().Interface(), tt.out) - continue - } - - // Check round trip. - if tt.err == nil { - enc, err := Encode(v.Interface()) - if err != nil { - t.Errorf("#%d: error re-marshaling: %v", i, err) - continue - } - vv := reflect.New(reflect.TypeOf(tt.ptr).Elem()) - - if err := Decode(vv.Interface(), enc); err != nil { - t.Errorf("#%d: error re-decodeing: %v", i, err) - continue - } - if !jsonEqual(v.Elem().Interface(), vv.Elem().Interface()) { - t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface()) - continue - } - } - } -} - -func TestStringKind(t *testing.T) { - type aMap map[string]int - - var m1, m2 map[string]int - m1 = map[string]int{ - "foo": 42, - } - - data, err := Encode(m1) - if err != nil { - t.Errorf("Unexpected error encoding: %v", err) - } - - err = Decode(&m2, data) - if err != nil { - t.Errorf("Unexpected error decoding: %v", err) - } - - if !jsonEqual(m1, m2) { - t.Error("Items should be equal after encoding and then decoding") - } - -} - -// Test handling of unexported fields that should be ignored. -type unexportedFields struct { - Name string - m map[string]interface{} `gorethink:"-"` - m2 map[string]interface{} `gorethink:"abcd"` -} - -func TestDecodeUnexported(t *testing.T) { - input := map[string]interface{}{ - "Name": "Bob", - "m": map[string]interface{}{ - "x": 123, - }, - "m2": map[string]interface{}{ - "y": 123, - }, - "abcd": map[string]interface{}{ - "z": 789, - }, - } - want := &unexportedFields{Name: "Bob"} - - out := &unexportedFields{} - err := Decode(out, input) - if err != nil { - t.Errorf("got error %v, expected nil", err) - } - if !jsonEqual(out, want) { - t.Errorf("got %q, want %q", out, want) - } -} - -type Foo struct { - FooBar interface{} `gorethink:"foobar"` -} -type Bar struct { - Baz int `gorethink:"baz"` -} - -type UnmarshalerPointer struct { - Value *UnmarshalerValue -} - -type UnmarshalerValue struct { - ValueInt int64 - ValueString string -} - -func (v *UnmarshalerValue) MarshalRQL() (interface{}, error) { - if v.ValueInt != int64(0) { - return Encode(v.ValueInt) - } - if v.ValueString != "" { - return Encode(v.ValueString) - } - - return Encode(nil) -} - -func (v *UnmarshalerValue) UnmarshalRQL(b interface{}) (err error) { - n, s := int64(0), "" - - if err = Decode(&s, b); err == nil { - v.ValueString = s - return - } - if err = Decode(&n, b); err == nil { - v.ValueInt = n - - } - - return -} - -func TestDecodeUnmarshalerPointer(t *testing.T) { - input := map[string]interface{}{ - "Value": "abc", - } - want := &UnmarshalerPointer{ - Value: &UnmarshalerValue{ValueString: "abc"}, - } - - out := &UnmarshalerPointer{} - err := Decode(out, input) - if err != nil { - t.Errorf("got error %v, expected nil", err) - } - if !jsonEqual(out, want) { - t.Errorf("got %q, want %q", out, want) - } -} - -func jsonEqual(a, b interface{}) bool { - ba, err := json.Marshal(a) - if err != nil { - panic(err) - } - bb, err := json.Marshal(b) - if err != nil { - panic(err) - } - - return bytes.Compare(ba, bb) == 0 -} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder_types.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder_types.go deleted file mode 100644 index 61d268f83..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/decoder_types.go +++ /dev/null @@ -1,524 +0,0 @@ -package encoding - -import ( - "bytes" - "fmt" - "reflect" - "strconv" -) - -// newTypeDecoder constructs an decoderFunc for a type. -func newTypeDecoder(dt, st reflect.Type) decoderFunc { - if reflect.PtrTo(dt).Implements(unmarshalerType) || - dt.Implements(unmarshalerType) { - return unmarshalerDecoder - } - - if st.Kind() == reflect.Interface { - return interfaceAsTypeDecoder - } - - switch dt.Kind() { - case reflect.Bool: - switch st.Kind() { - case reflect.Bool: - return boolAsBoolDecoder - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intAsBoolDecoder - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintAsBoolDecoder - case reflect.Float32, reflect.Float64: - return floatAsBoolDecoder - case reflect.String: - return stringAsBoolDecoder - default: - return decodeTypeError - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch st.Kind() { - case reflect.Bool: - return boolAsIntDecoder - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intAsIntDecoder - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintAsIntDecoder - case reflect.Float32, reflect.Float64: - return floatAsIntDecoder - case reflect.String: - return stringAsIntDecoder - default: - return decodeTypeError - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch st.Kind() { - case reflect.Bool: - return boolAsUintDecoder - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intAsUintDecoder - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintAsUintDecoder - case reflect.Float32, reflect.Float64: - return floatAsUintDecoder - case reflect.String: - return stringAsUintDecoder - default: - return decodeTypeError - } - case reflect.Float32, reflect.Float64: - switch st.Kind() { - case reflect.Bool: - return boolAsFloatDecoder - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intAsFloatDecoder - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintAsFloatDecoder - case reflect.Float32, reflect.Float64: - return floatAsFloatDecoder - case reflect.String: - return stringAsFloatDecoder - default: - return decodeTypeError - } - case reflect.String: - switch st.Kind() { - case reflect.Bool: - return boolAsStringDecoder - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intAsStringDecoder - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintAsStringDecoder - case reflect.Float32, reflect.Float64: - return floatAsStringDecoder - case reflect.String: - return stringAsStringDecoder - default: - return decodeTypeError - } - case reflect.Interface: - if !st.AssignableTo(dt) { - return decodeTypeError - } - - return interfaceDecoder - case reflect.Ptr: - return newPtrDecoder(dt, st) - case reflect.Map: - if st.AssignableTo(dt) { - return interfaceDecoder - } - - switch st.Kind() { - case reflect.Map: - return newMapAsMapDecoder(dt, st) - default: - return decodeTypeError - } - case reflect.Struct: - if st.AssignableTo(dt) { - return interfaceDecoder - } - - switch st.Kind() { - case reflect.Map: - if kind := st.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return newDecodeTypeError(fmt.Errorf("map needs string keys")) - } - - return newMapAsStructDecoder(dt, st) - default: - return decodeTypeError - } - case reflect.Slice: - if st.AssignableTo(dt) { - return interfaceDecoder - } - - switch st.Kind() { - case reflect.Array, reflect.Slice: - return newSliceDecoder(dt, st) - default: - return decodeTypeError - } - case reflect.Array: - if st.AssignableTo(dt) { - return interfaceDecoder - } - - switch st.Kind() { - case reflect.Array, reflect.Slice: - return newArrayDecoder(dt, st) - default: - return decodeTypeError - } - default: - return unsupportedTypeDecoder - } -} - -func invalidValueDecoder(dv, sv reflect.Value) { - dv.Set(reflect.Zero(dv.Type())) -} - -func unsupportedTypeDecoder(dv, sv reflect.Value) { - panic(&UnsupportedTypeError{dv.Type()}) -} - -func decodeTypeError(dv, sv reflect.Value) { - panic(&DecodeTypeError{ - DestType: dv.Type(), - SrcType: sv.Type(), - }) -} - -func newDecodeTypeError(err error) decoderFunc { - return func(dv, sv reflect.Value) { - panic(&DecodeTypeError{ - DestType: dv.Type(), - SrcType: sv.Type(), - Reason: err.Error(), - }) - } -} - -func interfaceDecoder(dv, sv reflect.Value) { - dv.Set(sv) -} - -func interfaceAsTypeDecoder(dv, sv reflect.Value) { - decode(dv, sv.Elem()) -} - -type ptrDecoder struct { - elemDec decoderFunc -} - -func (d *ptrDecoder) decode(dv, sv reflect.Value) { - v := reflect.New(dv.Type().Elem()) - d.elemDec(v, sv) - dv.Set(v) -} - -func newPtrDecoder(dt, st reflect.Type) decoderFunc { - dec := &ptrDecoder{typeDecoder(dt.Elem(), st)} - - return dec.decode -} - -func unmarshalerDecoder(dv, sv reflect.Value) { - // modeled off of https://golang.org/src/encoding/json/decode.go?#L325 - if dv.Kind() != reflect.Ptr && dv.Type().Name() != "" && dv.CanAddr() { - dv = dv.Addr() - } - - if dv.IsNil() { - dv.Set(reflect.New(dv.Type().Elem())) - } - - u := dv.Interface().(Unmarshaler) - err := u.UnmarshalRQL(sv.Interface()) - if err != nil { - panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) - } -} - -// Boolean decoders - -func boolAsBoolDecoder(dv, sv reflect.Value) { - dv.SetBool(sv.Bool()) -} -func boolAsIntDecoder(dv, sv reflect.Value) { - if sv.Bool() { - dv.SetInt(1) - } else { - dv.SetInt(0) - } -} -func boolAsUintDecoder(dv, sv reflect.Value) { - if sv.Bool() { - dv.SetUint(1) - } else { - dv.SetUint(0) - } -} -func boolAsFloatDecoder(dv, sv reflect.Value) { - if sv.Bool() { - dv.SetFloat(1) - } else { - dv.SetFloat(0) - } -} -func boolAsStringDecoder(dv, sv reflect.Value) { - if sv.Bool() { - dv.SetString("1") - } else { - dv.SetString("0") - } -} - -// Int decoders - -func intAsBoolDecoder(dv, sv reflect.Value) { - dv.SetBool(sv.Int() != 0) -} -func intAsIntDecoder(dv, sv reflect.Value) { - dv.SetInt(sv.Int()) -} -func intAsUintDecoder(dv, sv reflect.Value) { - dv.SetUint(uint64(sv.Int())) -} -func intAsFloatDecoder(dv, sv reflect.Value) { - dv.SetFloat(float64(sv.Int())) -} -func intAsStringDecoder(dv, sv reflect.Value) { - dv.SetString(strconv.FormatInt(sv.Int(), 10)) -} - -// Uint decoders - -func uintAsBoolDecoder(dv, sv reflect.Value) { - dv.SetBool(sv.Uint() != 0) -} -func uintAsIntDecoder(dv, sv reflect.Value) { - dv.SetInt(int64(sv.Uint())) -} -func uintAsUintDecoder(dv, sv reflect.Value) { - dv.SetUint(sv.Uint()) -} -func uintAsFloatDecoder(dv, sv reflect.Value) { - dv.SetFloat(float64(sv.Uint())) -} -func uintAsStringDecoder(dv, sv reflect.Value) { - dv.SetString(strconv.FormatUint(sv.Uint(), 10)) -} - -// Float decoders - -func floatAsBoolDecoder(dv, sv reflect.Value) { - dv.SetBool(sv.Float() != 0) -} -func floatAsIntDecoder(dv, sv reflect.Value) { - dv.SetInt(int64(sv.Float())) -} -func floatAsUintDecoder(dv, sv reflect.Value) { - dv.SetUint(uint64(sv.Float())) -} -func floatAsFloatDecoder(dv, sv reflect.Value) { - dv.SetFloat(float64(sv.Float())) -} -func floatAsStringDecoder(dv, sv reflect.Value) { - dv.SetString(strconv.FormatFloat(sv.Float(), 'f', -1, 64)) -} - -// String decoders - -func stringAsBoolDecoder(dv, sv reflect.Value) { - b, err := strconv.ParseBool(sv.String()) - if err == nil { - dv.SetBool(b) - } else if sv.String() == "" { - dv.SetBool(false) - } else { - panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) - } -} -func stringAsIntDecoder(dv, sv reflect.Value) { - i, err := strconv.ParseInt(sv.String(), 0, dv.Type().Bits()) - if err == nil { - dv.SetInt(i) - } else { - panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) - } -} -func stringAsUintDecoder(dv, sv reflect.Value) { - i, err := strconv.ParseUint(sv.String(), 0, dv.Type().Bits()) - if err == nil { - dv.SetUint(i) - } else { - panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) - } -} -func stringAsFloatDecoder(dv, sv reflect.Value) { - f, err := strconv.ParseFloat(sv.String(), dv.Type().Bits()) - if err == nil { - dv.SetFloat(f) - } else { - panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) - } -} -func stringAsStringDecoder(dv, sv reflect.Value) { - dv.SetString(sv.String()) -} - -// Slice/Array decoder - -type sliceDecoder struct { - arrayDec decoderFunc -} - -func (d *sliceDecoder) decode(dv, sv reflect.Value) { - if dv.Kind() == reflect.Slice { - dv.Set(reflect.MakeSlice(dv.Type(), dv.Len(), dv.Cap())) - } - - if !sv.IsNil() { - d.arrayDec(dv, sv) - } -} - -func newSliceDecoder(dt, st reflect.Type) decoderFunc { - // Byte slices get special treatment; arrays don't. - // if t.Elem().Kind() == reflect.Uint8 { - // return decodeByteSlice - // } - dec := &sliceDecoder{newArrayDecoder(dt, st)} - return dec.decode -} - -type arrayDecoder struct { - elemDec decoderFunc -} - -func (d *arrayDecoder) decode(dv, sv reflect.Value) { - // Iterate through the slice/array and decode each element before adding it - // to the dest slice/array - i := 0 - for i < sv.Len() { - if dv.Kind() == reflect.Slice { - // Get element of array, growing if necessary. - if i >= dv.Cap() { - newcap := dv.Cap() + dv.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newdv := reflect.MakeSlice(dv.Type(), dv.Len(), newcap) - reflect.Copy(newdv, dv) - dv.Set(newdv) - } - if i >= dv.Len() { - dv.SetLen(i + 1) - } - } - - if i < dv.Len() { - // Decode into element. - d.elemDec(dv.Index(i), sv.Index(i)) - } - - i++ - } - - // Ensure that the destination is the correct size - if i < dv.Len() { - if dv.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(dv.Type().Elem()) - for ; i < dv.Len(); i++ { - dv.Index(i).Set(z) - } - } else { - dv.SetLen(i) - } - } -} - -func newArrayDecoder(dt, st reflect.Type) decoderFunc { - dec := &arrayDecoder{typeDecoder(dt.Elem(), st.Elem())} - return dec.decode -} - -// Map decoder - -type mapAsMapDecoder struct { - keyDec, elemDec decoderFunc -} - -func (d *mapAsMapDecoder) decode(dv, sv reflect.Value) { - dt := dv.Type() - dv.Set(reflect.MakeMap(reflect.MapOf(dt.Key(), dt.Elem()))) - - var mapKey reflect.Value - var mapElem reflect.Value - - keyType := dv.Type().Key() - elemType := dv.Type().Elem() - - for _, sElemKey := range sv.MapKeys() { - var dElemKey reflect.Value - var dElemVal reflect.Value - - if !mapKey.IsValid() { - mapKey = reflect.New(keyType).Elem() - } else { - mapKey.Set(reflect.Zero(keyType)) - } - dElemKey = mapKey - - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - dElemVal = mapElem - - d.keyDec(dElemKey, sElemKey) - d.elemDec(dElemVal, sv.MapIndex(sElemKey)) - - dv.SetMapIndex(dElemKey, dElemVal) - } -} - -func newMapAsMapDecoder(dt, st reflect.Type) decoderFunc { - d := &mapAsMapDecoder{typeDecoder(dt.Key(), st.Key()), typeDecoder(dt.Elem(), st.Elem())} - return d.decode -} - -type mapAsStructDecoder struct { - fields []field - fieldDecs []decoderFunc -} - -func (d *mapAsStructDecoder) decode(dv, sv reflect.Value) { - for _, kv := range sv.MapKeys() { - var f *field - var fieldDec decoderFunc - key := []byte(kv.String()) - for i := range d.fields { - ff := &d.fields[i] - ffd := d.fieldDecs[i] - if bytes.Equal(ff.nameBytes, key) { - f = ff - fieldDec = ffd - break - } - if f == nil && ff.equalFold(ff.nameBytes, key) { - f = ff - fieldDec = ffd - } - } - - if f == nil { - continue - } - - dElemVal := fieldByIndex(dv, f.index) - sElemVal := sv.MapIndex(kv) - - if !sElemVal.IsValid() || !dElemVal.CanSet() { - continue - } - - fieldDec(dElemVal, sElemVal) - } -} - -func newMapAsStructDecoder(dt, st reflect.Type) decoderFunc { - fields := cachedTypeFields(dt) - se := &mapAsStructDecoder{ - fields: fields, - fieldDecs: make([]decoderFunc, len(fields)), - } - for i, f := range fields { - se.fieldDecs[i] = typeDecoder(typeByIndex(dt, f.index), st.Elem()) - } - return se.decode -} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder.go deleted file mode 100644 index 3b0d3508d..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder.go +++ /dev/null @@ -1,89 +0,0 @@ -// This code is based on encoding/json and gorilla/schema - -package encoding - -import ( - "errors" - "reflect" - "runtime" - "sync" -) - -type encoderFunc func(v reflect.Value) interface{} - -// Encode returns the encoded value of v. -// -// Encode traverses the value v recursively and looks for structs. If a struct -// is found then it is checked for tagged fields and convert to -// map[string]interface{} -func Encode(v interface{}) (ev interface{}, err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - if v, ok := r.(string); ok { - err = errors.New(v) - } else { - err = r.(error) - } - } - }() - - return encode(reflect.ValueOf(v)), nil -} - -func encode(v reflect.Value) interface{} { - return valueEncoder(v)(v) -} - -var encoderCache struct { - sync.RWMutex - m map[reflect.Type]encoderFunc -} - -func valueEncoder(v reflect.Value) encoderFunc { - if !v.IsValid() { - return invalidValueEncoder - } - return typeEncoder(v.Type()) -} - -func typeEncoder(t reflect.Type) encoderFunc { - encoderCache.RLock() - f := encoderCache.m[t] - encoderCache.RUnlock() - if f != nil { - return f - } - - // To deal with recursive types, populate the map with an - // indirect func before we build it. This type waits on the - // real func (f) to - // be ready and then calls it. This indirect - // func is only used for recursive types. - encoderCache.Lock() - var wg sync.WaitGroup - wg.Add(1) - encoderCache.m[t] = func(v reflect.Value) interface{} { - wg.Wait() - return f(v) - } - encoderCache.Unlock() - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = newTypeEncoder(t, true) - wg.Done() - encoderCache.Lock() - encoderCache.m[t] = f - encoderCache.Unlock() - return f -} - -// IgnoreType causes the encoder to ignore a type when encoding -func IgnoreType(t reflect.Type) { - encoderCache.Lock() - encoderCache.m[t] = doNothingEncoder - encoderCache.Unlock() -} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder_test.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder_test.go deleted file mode 100644 index 7b1ee0614..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder_test.go +++ /dev/null @@ -1,262 +0,0 @@ -package encoding - -import ( - "image" - "reflect" - "testing" - "time" -) - -var encodeExpected = map[string]interface{}{ - "Level0": int64(1), - "Level1b": int64(2), - "Level1c": int64(3), - "Level1a": int64(5), - "LEVEL1B": int64(6), - "e": map[string]interface{}{ - "Level1a": int64(8), - "Level1b": int64(9), - "Level1c": int64(10), - "Level1d": int64(11), - "x": int64(12), - }, - "Loop1": int64(13), - "Loop2": int64(14), - "X": int64(15), - "Y": int64(16), - "Z": int64(17), -} - -func TestEncode(t *testing.T) { - // Top is defined in decoder_test.go - var in = Top{ - Level0: 1, - Embed0: Embed0{ - Level1b: 2, - Level1c: 3, - }, - Embed0a: &Embed0a{ - Level1a: 5, - Level1b: 6, - }, - Embed0b: &Embed0b{ - Level1a: 8, - Level1b: 9, - Level1c: 10, - Level1d: 11, - Level1e: 12, - }, - Loop: Loop{ - Loop1: 13, - Loop2: 14, - }, - Embed0p: Embed0p{ - Point: image.Point{X: 15, Y: 16}, - }, - Embed0q: Embed0q{ - Point: Point{Z: 17}, - }, - } - - got, err := Encode(&in) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, encodeExpected) { - t.Errorf(" got: %v\nwant: %v\n", got, encodeExpected) - } -} - -type Optionals struct { - Sr string `gorethink:"sr"` - So string `gorethink:"so,omitempty"` - Sw string `gorethink:"-"` - - Ir int `gorethink:"omitempty"` // actually named omitempty, not an option - Io int `gorethink:"io,omitempty"` - - Tr time.Time `gorethink:"tr"` - To time.Time `gorethink:"to,omitempty"` - - Slr []string `gorethink:"slr"` - Slo []string `gorethink:"slo,omitempty"` - - Mr map[string]interface{} `gorethink:"mr"` - Mo map[string]interface{} `gorethink:",omitempty"` -} - -var optionalsExpected = map[string]interface{}{ - "sr": "", - "omitempty": int64(0), - "tr": map[string]interface{}{"$reql_type$": "TIME", "epoch_time": 0, "timezone": "+00:00"}, - "slr": []interface{}{}, - "mr": map[string]interface{}{}, -} - -func TestOmitEmpty(t *testing.T) { - var o Optionals - o.Sw = "something" - o.Tr = time.Unix(0, 0) - o.Mr = map[string]interface{}{} - o.Mo = map[string]interface{}{} - - got, err := Encode(&o) - if err != nil { - t.Fatal(err) - } - if !jsonEqual(got, optionalsExpected) { - t.Errorf("\ngot: %#v\nwant: %#v\n", got, optionalsExpected) - } -} - -type IntType int - -type MyStruct struct { - IntType -} - -func TestAnonymousNonstruct(t *testing.T) { - var i IntType = 11 - a := MyStruct{i} - var want = map[string]interface{}{"IntType": int64(11)} - - got, err := Encode(a) - if err != nil { - t.Fatalf("Encode: %v", err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("got %v, want %v", got, want) - } -} - -func TestEncodePointer(t *testing.T) { - v := Pointer{PPoint: &Point{Z: 1}, Point: Point{Z: 2}} - var want = map[string]interface{}{ - "PPoint": map[string]interface{}{"Z": int64(1)}, - "Point": map[string]interface{}{"Z": int64(2)}, - } - - got, err := Encode(v) - if err != nil { - t.Fatalf("Encode: %v", err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("got %v, want %v", got, want) - } -} - -func TestEncodeNilPointer(t *testing.T) { - v := Pointer{PPoint: nil, Point: Point{Z: 2}} - var want = map[string]interface{}{ - "PPoint": nil, - "Point": map[string]interface{}{"Z": int64(2)}, - } - - got, err := Encode(v) - if err != nil { - t.Fatalf("Encode: %v", err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("got %v, want %v", got, want) - } -} - -type BugA struct { - S string -} - -type BugB struct { - BugA - S string -} - -type BugC struct { - S string -} - -// Legal Go: We never use the repeated embedded field (S). -type BugX struct { - A int - BugA - BugB -} - -// Issue 5245. -func TestEmbeddedBug(t *testing.T) { - v := BugB{ - BugA{"A"}, - "B", - } - got, err := Encode(v) - if err != nil { - t.Fatal("Encode:", err) - } - want := map[string]interface{}{"S": "B"} - if !reflect.DeepEqual(got, want) { - t.Fatalf("Encode: got %v want %v", got, want) - } - // Now check that the duplicate field, S, does not appear. - x := BugX{ - A: 23, - } - got, err = Encode(x) - if err != nil { - t.Fatal("Encode:", err) - } - want = map[string]interface{}{"A": int64(23)} - if !reflect.DeepEqual(got, want) { - t.Fatalf("Encode: got %v want %v", got, want) - } -} - -type BugD struct { // Same as BugA after tagging. - XXX string `gorethink:"S"` -} - -// BugD's tagged S field should dominate BugA's. -type BugY struct { - BugA - BugD -} - -// Test that a field with a tag dominates untagged fields. -func TestTaggedFieldDominates(t *testing.T) { - v := BugY{ - BugA{"BugA"}, - BugD{"BugD"}, - } - got, err := Encode(v) - if err != nil { - t.Fatal("Encode:", err) - } - want := map[string]interface{}{"S": "BugD"} - if !reflect.DeepEqual(got, want) { - t.Fatalf("Encode: got %v want %v", got, want) - } -} - -// There are no tags here, so S should not appear. -type BugZ struct { - BugA - BugC - BugY // Contains a tagged S field through BugD; should not dominate. -} - -func TestDuplicatedFieldDisappears(t *testing.T) { - v := BugZ{ - BugA{"BugA"}, - BugC{"BugC"}, - BugY{ - BugA{"nested BugA"}, - BugD{"nested BugD"}, - }, - } - got, err := Encode(v) - if err != nil { - t.Fatal("Encode:", err) - } - want := map[string]interface{}{} - if !reflect.DeepEqual(got, want) { - t.Fatalf("Encode: got %v want %v", got, want) - } -} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder_types.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder_types.go deleted file mode 100644 index de38a1905..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoder_types.go +++ /dev/null @@ -1,303 +0,0 @@ -package encoding - -import ( - "encoding/base64" - "math" - "reflect" - "time" -) - -// newTypeEncoder constructs an encoderFunc for a type. -// The returned encoder only checks CanAddr when allowAddr is true. -func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc { - if t.Implements(marshalerType) { - return marshalerEncoder - } - if t.Kind() != reflect.Ptr && allowAddr { - if reflect.PtrTo(t).Implements(marshalerType) { - return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false)) - } - } - - // Check for psuedo-types first - switch t { - case timeType: - return timePseudoTypeEncoder - } - - switch t.Kind() { - case reflect.Bool: - return boolEncoder - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intEncoder - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintEncoder - case reflect.Float32, reflect.Float64: - return floatEncoder - case reflect.String: - return stringEncoder - case reflect.Interface: - return interfaceEncoder - case reflect.Struct: - return newStructEncoder(t) - case reflect.Map: - return newMapEncoder(t) - case reflect.Slice: - return newSliceEncoder(t) - case reflect.Array: - return newArrayEncoder(t) - case reflect.Ptr: - return newPtrEncoder(t) - default: - return unsupportedTypeEncoder - } -} - -func invalidValueEncoder(v reflect.Value) interface{} { - return nil -} - -func doNothingEncoder(v reflect.Value) interface{} { - return v.Interface() -} - -func marshalerEncoder(v reflect.Value) interface{} { - if v.Kind() == reflect.Ptr && v.IsNil() { - return nil - } - m := v.Interface().(Marshaler) - ev, err := m.MarshalRQL() - if err != nil { - panic(&MarshalerError{v.Type(), err}) - } - - return ev -} - -func addrMarshalerEncoder(v reflect.Value) interface{} { - va := v.Addr() - if va.IsNil() { - return nil - } - m := va.Interface().(Marshaler) - ev, err := m.MarshalRQL() - if err != nil { - panic(&MarshalerError{v.Type(), err}) - } - - return ev -} - -func boolEncoder(v reflect.Value) interface{} { - if v.Bool() { - return true - } else { - return false - } -} - -func intEncoder(v reflect.Value) interface{} { - return v.Int() -} - -func uintEncoder(v reflect.Value) interface{} { - return v.Uint() -} - -func floatEncoder(v reflect.Value) interface{} { - return v.Float() -} - -func stringEncoder(v reflect.Value) interface{} { - return v.String() -} - -func interfaceEncoder(v reflect.Value) interface{} { - if v.IsNil() { - return nil - } - return encode(v.Elem()) -} - -func unsupportedTypeEncoder(v reflect.Value) interface{} { - panic(&UnsupportedTypeError{v.Type()}) -} - -type structEncoder struct { - fields []field - fieldEncs []encoderFunc -} - -func (se *structEncoder) encode(v reflect.Value) interface{} { - m := make(map[string]interface{}) - - for i, f := range se.fields { - fv := fieldByIndex(v, f.index) - if !fv.IsValid() || f.omitEmpty && se.isEmptyValue(fv) { - continue - } - - m[f.name] = se.fieldEncs[i](fv) - } - - return m -} - -func (se *structEncoder) isEmptyValue(v reflect.Value) bool { - if v.Type() == timeType { - return v.Interface().(time.Time) == time.Time{} - } - - return isEmptyValue(v) -} - -func newStructEncoder(t reflect.Type) encoderFunc { - fields := cachedTypeFields(t) - se := &structEncoder{ - fields: fields, - fieldEncs: make([]encoderFunc, len(fields)), - } - for i, f := range fields { - se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index)) - } - return se.encode -} - -type mapEncoder struct { - elemEnc encoderFunc -} - -func (me *mapEncoder) encode(v reflect.Value) interface{} { - if v.IsNil() { - return nil - } - - m := make(map[string]interface{}) - - for _, k := range v.MapKeys() { - m[k.String()] = me.elemEnc(v.MapIndex(k)) - } - - return m -} - -func newMapEncoder(t reflect.Type) encoderFunc { - if t.Key().Kind() != reflect.String { - return unsupportedTypeEncoder - } - me := &mapEncoder{typeEncoder(t.Elem())} - return me.encode -} - -// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil. -type sliceEncoder struct { - arrayEnc encoderFunc -} - -func (se *sliceEncoder) encode(v reflect.Value) interface{} { - if v.IsNil() { - return []interface{}{} - } - return se.arrayEnc(v) -} - -func newSliceEncoder(t reflect.Type) encoderFunc { - // Byte slices get special treatment; arrays don't. - if t.Elem().Kind() == reflect.Uint8 { - return encodeByteSlice - } - enc := &sliceEncoder{newArrayEncoder(t)} - return enc.encode -} - -type arrayEncoder struct { - elemEnc encoderFunc -} - -func (ae *arrayEncoder) encode(v reflect.Value) interface{} { - n := v.Len() - - a := make([]interface{}, n) - for i := 0; i < n; i++ { - a[i] = ae.elemEnc(v.Index(i)) - } - - return a -} - -func newArrayEncoder(t reflect.Type) encoderFunc { - enc := &arrayEncoder{typeEncoder(t.Elem())} - return enc.encode -} - -type ptrEncoder struct { - elemEnc encoderFunc -} - -func (pe *ptrEncoder) encode(v reflect.Value) interface{} { - if v.IsNil() { - return nil - } - return pe.elemEnc(v.Elem()) -} - -func newPtrEncoder(t reflect.Type) encoderFunc { - enc := &ptrEncoder{typeEncoder(t.Elem())} - return enc.encode -} - -type condAddrEncoder struct { - canAddrEnc, elseEnc encoderFunc -} - -func (ce *condAddrEncoder) encode(v reflect.Value) interface{} { - if v.CanAddr() { - return ce.canAddrEnc(v) - } else { - return ce.elseEnc(v) - } -} - -// newCondAddrEncoder returns an encoder that checks whether its value -// CanAddr and delegates to canAddrEnc if so, else to elseEnc. -func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc { - enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} - return enc.encode -} - -// Pseudo-type encoders - -// Encode a time.Time value to the TIME RQL type -func timePseudoTypeEncoder(v reflect.Value) interface{} { - t := v.Interface().(time.Time) - - timeVal := float64(t.UnixNano()) / float64(time.Second) - - // use seconds-since-epoch precision if time.Time `t` - // is before the oldest nanosecond time - if t.Before(time.Unix(0, math.MinInt64)) { - timeVal = float64(t.Unix()) - } - - return map[string]interface{}{ - "$reql_type$": "TIME", - "epoch_time": timeVal, - "timezone": "+00:00", - } -} - -// Encode a byte slice to the BINARY RQL type -func encodeByteSlice(v reflect.Value) interface{} { - var b []byte - if !v.IsNil() { - b = v.Bytes() - } - - dst := make([]byte, base64.StdEncoding.EncodedLen(len(b))) - base64.StdEncoding.Encode(dst, b) - - return map[string]interface{}{ - "$reql_type$": "BINARY", - "data": string(dst), - } -} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoding.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoding.go deleted file mode 100644 index 0169e1448..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/encoding.go +++ /dev/null @@ -1,32 +0,0 @@ -package encoding - -import ( - "reflect" - "time" -) - -var ( - // type constants - stringType = reflect.TypeOf("") - timeType = reflect.TypeOf(new(time.Time)).Elem() - - marshalerType = reflect.TypeOf(new(Marshaler)).Elem() - unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() -) - -// Marshaler is the interface implemented by objects that -// can marshal themselves into a valid RQL psuedo-type. -type Marshaler interface { - MarshalRQL() (interface{}, error) -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a psuedo-type object of themselves. -type Unmarshaler interface { - UnmarshalRQL(interface{}) error -} - -func init() { - encoderCache.m = make(map[reflect.Type]encoderFunc) - decoderCache.m = make(map[decoderCacheKey]decoderFunc) -} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/errors.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/errors.go deleted file mode 100644 index 8b9ac2c52..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/errors.go +++ /dev/null @@ -1,102 +0,0 @@ -package encoding - -import ( - "fmt" - "reflect" - "strings" -) - -type MarshalerError struct { - Type reflect.Type - Err error -} - -func (e *MarshalerError) Error() string { - return "gorethink: error calling MarshalRQL for type " + e.Type.String() + ": " + e.Err.Error() -} - -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "gorethink: UnmarshalRQL(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "gorethink: UnmarshalRQL(non-pointer " + e.Type.String() + ")" - } - return "gorethink: UnmarshalRQL(nil " + e.Type.String() + ")" -} - -// An InvalidTypeError describes a value that was -// not appropriate for a value of a specific Go type. -type DecodeTypeError struct { - DestType, SrcType reflect.Type - Reason string -} - -func (e *DecodeTypeError) Error() string { - if e.Reason != "" { - return "gorethink: could not decode type " + e.SrcType.String() + " into Go value of type " + e.DestType.String() + ": " + e.Reason - } else { - return "gorethink: could not decode type " + e.SrcType.String() + " into Go value of type " + e.DestType.String() - - } -} - -// An UnsupportedTypeError is returned by Marshal when attempting -// to encode an unsupported value type. -type UnsupportedTypeError struct { - Type reflect.Type -} - -func (e *UnsupportedTypeError) Error() string { - return "gorethink: unsupported type: " + e.Type.String() -} - -// An UnsupportedTypeError is returned by Marshal when attempting -// to encode an unexpected value type. -type UnexpectedTypeError struct { - DestType, SrcType reflect.Type -} - -func (e *UnexpectedTypeError) Error() string { - return "gorethink: expected type: " + e.DestType.String() + ", got " + e.SrcType.String() -} - -type UnsupportedValueError struct { - Value reflect.Value - Str string -} - -func (e *UnsupportedValueError) Error() string { - return "gorethink: unsupported value: " + e.Str -} - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/fold.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/fold.go deleted file mode 100644 index 21c9e68e4..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/fold.go +++ /dev/null @@ -1,139 +0,0 @@ -package encoding - -import ( - "bytes" - "unicode/utf8" -) - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// foldFunc returns one of four different case folding equivalence -// functions, from most general (and slow) to fastest: -// -// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 -// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') -// 3) asciiEqualFold, no special, but includes non-letters (including _) -// 4) simpleLetterEqualFold, no specials, no non-letters. -// -// The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign -// See http://play.golang.org/p/tTxjOc0OGo -// -// The returned function is specialized for matching against s and -// should only be given s. It's not curried for performance reasons. -func foldFunc(s []byte) func(s, t []byte) bool { - nonLetter := false - special := false // special letter - for _, b := range s { - if b >= utf8.RuneSelf { - return bytes.EqualFold - } - upper := b & caseMask - if upper < 'A' || upper > 'Z' { - nonLetter = true - } else if upper == 'K' || upper == 'S' { - // See above for why these letters are special. - special = true - } - } - if special { - return equalFoldRight - } - if nonLetter { - return asciiEqualFold - } - return simpleLetterEqualFold -} - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func equalFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - if len(t) > 0 { - return false - } - return true -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func asciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func simpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/tags.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/tags.go deleted file mode 100644 index cea3edaf1..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/tags.go +++ /dev/null @@ -1,69 +0,0 @@ -// This code is based on encoding/json and gorilla/schema - -package encoding - -import ( - "reflect" - "strings" - "unicode" -) - -const TagName = "gorethink" - -// tagOptions is the string following a comma in a struct field's -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -func getTag(sf reflect.StructField) string { - return sf.Tag.Get(TagName) -} - -// parseTag splits a struct field's tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -// Contains returns whether checks that a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/utils.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/utils.go deleted file mode 100644 index 0ca2c7734..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/encoding/utils.go +++ /dev/null @@ -1,72 +0,0 @@ -package encoding - -import "reflect" - -func getTypeKind(t reflect.Type) reflect.Kind { - kind := t.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} - -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func fieldByIndex(v reflect.Value, index []int) reflect.Value { - for _, i := range index { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = v.Field(i) - } - - return v -} - -func typeByIndex(t reflect.Type, index []int) reflect.Type { - for _, i := range index { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - t = t.Field(i).Type - } - return t -} - -// valueByString sorts reflect.Value by the string value, this is useful for -// sorting the result of MapKeys -type valueByString []reflect.Value - -func (x valueByString) Len() int { return len(x) } - -func (x valueByString) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x valueByString) Less(i, j int) bool { - return x[i].String() < x[j].String() -} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/ql2/ql2.pb.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/ql2/ql2.pb.go deleted file mode 100644 index 54ac15198..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/ql2/ql2.pb.go +++ /dev/null @@ -1,1243 +0,0 @@ -// Code generated by protoc-gen-go. -// source: ql2.proto -// DO NOT EDIT! - -package ql2 - -import proto "github.com/golang/protobuf/proto" -import json "encoding/json" -import math "math" - -// Reference proto, json, and math imports to suppress error if they are not otherwise used. -var _ = proto.Marshal -var _ = &json.SyntaxError{} -var _ = math.Inf - -type VersionDummy_Version int32 - -const ( - VersionDummy_V0_1 VersionDummy_Version = 1063369270 - VersionDummy_V0_2 VersionDummy_Version = 1915781601 - VersionDummy_V0_3 VersionDummy_Version = 1601562686 - VersionDummy_V0_4 VersionDummy_Version = 1074539808 -) - -var VersionDummy_Version_name = map[int32]string{ - 1063369270: "V0_1", - 1915781601: "V0_2", - 1601562686: "V0_3", - 1074539808: "V0_4", -} -var VersionDummy_Version_value = map[string]int32{ - "V0_1": 1063369270, - "V0_2": 1915781601, - "V0_3": 1601562686, - "V0_4": 1074539808, -} - -func (x VersionDummy_Version) Enum() *VersionDummy_Version { - p := new(VersionDummy_Version) - *p = x - return p -} -func (x VersionDummy_Version) String() string { - return proto.EnumName(VersionDummy_Version_name, int32(x)) -} -func (x VersionDummy_Version) MarshalJSON() ([]byte, error) { - return json.Marshal(x.String()) -} -func (x *VersionDummy_Version) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(VersionDummy_Version_value, data, "VersionDummy_Version") - if err != nil { - return err - } - *x = VersionDummy_Version(value) - return nil -} - -type VersionDummy_Protocol int32 - -const ( - VersionDummy_PROTOBUF VersionDummy_Protocol = 656407617 - VersionDummy_JSON VersionDummy_Protocol = 2120839367 -) - -var VersionDummy_Protocol_name = map[int32]string{ - 656407617: "PROTOBUF", - 2120839367: "JSON", -} -var VersionDummy_Protocol_value = map[string]int32{ - "PROTOBUF": 656407617, - "JSON": 2120839367, -} - -func (x VersionDummy_Protocol) Enum() *VersionDummy_Protocol { - p := new(VersionDummy_Protocol) - *p = x - return p -} -func (x VersionDummy_Protocol) String() string { - return proto.EnumName(VersionDummy_Protocol_name, int32(x)) -} -func (x VersionDummy_Protocol) MarshalJSON() ([]byte, error) { - return json.Marshal(x.String()) -} -func (x *VersionDummy_Protocol) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(VersionDummy_Protocol_value, data, "VersionDummy_Protocol") - if err != nil { - return err - } - *x = VersionDummy_Protocol(value) - return nil -} - -type Query_QueryType int32 - -const ( - Query_START Query_QueryType = 1 - Query_CONTINUE Query_QueryType = 2 - Query_STOP Query_QueryType = 3 - Query_NOREPLY_WAIT Query_QueryType = 4 -) - -var Query_QueryType_name = map[int32]string{ - 1: "START", - 2: "CONTINUE", - 3: "STOP", - 4: "NOREPLY_WAIT", -} -var Query_QueryType_value = map[string]int32{ - "START": 1, - "CONTINUE": 2, - "STOP": 3, - "NOREPLY_WAIT": 4, -} - -func (x Query_QueryType) Enum() *Query_QueryType { - p := new(Query_QueryType) - *p = x - return p -} -func (x Query_QueryType) String() string { - return proto.EnumName(Query_QueryType_name, int32(x)) -} -func (x Query_QueryType) MarshalJSON() ([]byte, error) { - return json.Marshal(x.String()) -} -func (x *Query_QueryType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_QueryType_value, data, "Query_QueryType") - if err != nil { - return err - } - *x = Query_QueryType(value) - return nil -} - -type Frame_FrameType int32 - -const ( - Frame_POS Frame_FrameType = 1 - Frame_OPT Frame_FrameType = 2 -) - -var Frame_FrameType_name = map[int32]string{ - 1: "POS", - 2: "OPT", -} -var Frame_FrameType_value = map[string]int32{ - "POS": 1, - "OPT": 2, -} - -func (x Frame_FrameType) Enum() *Frame_FrameType { - p := new(Frame_FrameType) - *p = x - return p -} -func (x Frame_FrameType) String() string { - return proto.EnumName(Frame_FrameType_name, int32(x)) -} -func (x Frame_FrameType) MarshalJSON() ([]byte, error) { - return json.Marshal(x.String()) -} -func (x *Frame_FrameType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Frame_FrameType_value, data, "Frame_FrameType") - if err != nil { - return err - } - *x = Frame_FrameType(value) - return nil -} - -type Response_ResponseType int32 - -const ( - Response_SUCCESS_ATOM Response_ResponseType = 1 - Response_SUCCESS_SEQUENCE Response_ResponseType = 2 - Response_SUCCESS_PARTIAL Response_ResponseType = 3 - Response_WAIT_COMPLETE Response_ResponseType = 4 - Response_CLIENT_ERROR Response_ResponseType = 16 - Response_COMPILE_ERROR Response_ResponseType = 17 - Response_RUNTIME_ERROR Response_ResponseType = 18 -) - -var Response_ResponseType_name = map[int32]string{ - 1: "SUCCESS_ATOM", - 2: "SUCCESS_SEQUENCE", - 3: "SUCCESS_PARTIAL", - 4: "WAIT_COMPLETE", - 16: "CLIENT_ERROR", - 17: "COMPILE_ERROR", - 18: "RUNTIME_ERROR", -} -var Response_ResponseType_value = map[string]int32{ - "SUCCESS_ATOM": 1, - "SUCCESS_SEQUENCE": 2, - "SUCCESS_PARTIAL": 3, - "WAIT_COMPLETE": 4, - "CLIENT_ERROR": 16, - "COMPILE_ERROR": 17, - "RUNTIME_ERROR": 18, -} - -func (x Response_ResponseType) Enum() *Response_ResponseType { - p := new(Response_ResponseType) - *p = x - return p -} -func (x Response_ResponseType) String() string { - return proto.EnumName(Response_ResponseType_name, int32(x)) -} -func (x Response_ResponseType) MarshalJSON() ([]byte, error) { - return json.Marshal(x.String()) -} -func (x *Response_ResponseType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Response_ResponseType_value, data, "Response_ResponseType") - if err != nil { - return err - } - *x = Response_ResponseType(value) - return nil -} - -type Response_ResponseNote int32 - -const ( - Response_SEQUENCE_FEED Response_ResponseNote = 1 - Response_ATOM_FEED Response_ResponseNote = 2 - Response_ORDER_BY_LIMIT_FEED Response_ResponseNote = 3 - Response_UNIONED_FEED Response_ResponseNote = 4 - Response_INCLUDES_STATES Response_ResponseNote = 5 -) - -var Response_ResponseNote_name = map[int32]string{ - 1: "SEQUENCE_FEED", - 2: "ATOM_FEED", - 3: "ORDER_BY_LIMIT_FEED", - 4: "UNIONED_FEED", - 5: "INCLUDES_STATES", -} -var Response_ResponseNote_value = map[string]int32{ - "SEQUENCE_FEED": 1, - "ATOM_FEED": 2, - "ORDER_BY_LIMIT_FEED": 3, - "UNIONED_FEED": 4, - "INCLUDES_STATES": 5, -} - -func (x Response_ResponseNote) Enum() *Response_ResponseNote { - p := new(Response_ResponseNote) - *p = x - return p -} -func (x Response_ResponseNote) String() string { - return proto.EnumName(Response_ResponseNote_name, int32(x)) -} -func (x Response_ResponseNote) MarshalJSON() ([]byte, error) { - return json.Marshal(x.String()) -} -func (x *Response_ResponseNote) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Response_ResponseNote_value, data, "Response_ResponseNote") - if err != nil { - return err - } - *x = Response_ResponseNote(value) - return nil -} - -type Datum_DatumType int32 - -const ( - Datum_R_NULL Datum_DatumType = 1 - Datum_R_BOOL Datum_DatumType = 2 - Datum_R_NUM Datum_DatumType = 3 - Datum_R_STR Datum_DatumType = 4 - Datum_R_ARRAY Datum_DatumType = 5 - Datum_R_OBJECT Datum_DatumType = 6 - Datum_R_JSON Datum_DatumType = 7 -) - -var Datum_DatumType_name = map[int32]string{ - 1: "R_NULL", - 2: "R_BOOL", - 3: "R_NUM", - 4: "R_STR", - 5: "R_ARRAY", - 6: "R_OBJECT", - 7: "R_JSON", -} -var Datum_DatumType_value = map[string]int32{ - "R_NULL": 1, - "R_BOOL": 2, - "R_NUM": 3, - "R_STR": 4, - "R_ARRAY": 5, - "R_OBJECT": 6, - "R_JSON": 7, -} - -func (x Datum_DatumType) Enum() *Datum_DatumType { - p := new(Datum_DatumType) - *p = x - return p -} -func (x Datum_DatumType) String() string { - return proto.EnumName(Datum_DatumType_name, int32(x)) -} -func (x Datum_DatumType) MarshalJSON() ([]byte, error) { - return json.Marshal(x.String()) -} -func (x *Datum_DatumType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Datum_DatumType_value, data, "Datum_DatumType") - if err != nil { - return err - } - *x = Datum_DatumType(value) - return nil -} - -type Term_TermType int32 - -const ( - Term_DATUM Term_TermType = 1 - Term_MAKE_ARRAY Term_TermType = 2 - Term_MAKE_OBJ Term_TermType = 3 - Term_VAR Term_TermType = 10 - Term_JAVASCRIPT Term_TermType = 11 - Term_UUID Term_TermType = 169 - Term_HTTP Term_TermType = 153 - Term_ERROR Term_TermType = 12 - Term_IMPLICIT_VAR Term_TermType = 13 - Term_DB Term_TermType = 14 - Term_TABLE Term_TermType = 15 - Term_GET Term_TermType = 16 - Term_GET_ALL Term_TermType = 78 - Term_EQ Term_TermType = 17 - Term_NE Term_TermType = 18 - Term_LT Term_TermType = 19 - Term_LE Term_TermType = 20 - Term_GT Term_TermType = 21 - Term_GE Term_TermType = 22 - Term_NOT Term_TermType = 23 - Term_ADD Term_TermType = 24 - Term_SUB Term_TermType = 25 - Term_MUL Term_TermType = 26 - Term_DIV Term_TermType = 27 - Term_MOD Term_TermType = 28 - Term_APPEND Term_TermType = 29 - Term_PREPEND Term_TermType = 80 - Term_DIFFERENCE Term_TermType = 95 - Term_SET_INSERT Term_TermType = 88 - Term_SET_INTERSECTION Term_TermType = 89 - Term_SET_UNION Term_TermType = 90 - Term_SET_DIFFERENCE Term_TermType = 91 - Term_SLICE Term_TermType = 30 - Term_SKIP Term_TermType = 70 - Term_LIMIT Term_TermType = 71 - Term_OFFSETS_OF Term_TermType = 87 - Term_CONTAINS Term_TermType = 93 - Term_GET_FIELD Term_TermType = 31 - Term_KEYS Term_TermType = 94 - Term_OBJECT Term_TermType = 143 - Term_HAS_FIELDS Term_TermType = 32 - Term_WITH_FIELDS Term_TermType = 96 - Term_PLUCK Term_TermType = 33 - Term_WITHOUT Term_TermType = 34 - Term_MERGE Term_TermType = 35 - Term_BETWEEN_DEPRECATED Term_TermType = 36 - Term_BETWEEN Term_TermType = 182 - Term_REDUCE Term_TermType = 37 - Term_MAP Term_TermType = 38 - Term_FILTER Term_TermType = 39 - Term_CONCAT_MAP Term_TermType = 40 - Term_ORDER_BY Term_TermType = 41 - Term_DISTINCT Term_TermType = 42 - Term_COUNT Term_TermType = 43 - Term_IS_EMPTY Term_TermType = 86 - Term_UNION Term_TermType = 44 - Term_NTH Term_TermType = 45 - Term_BRACKET Term_TermType = 170 - Term_INNER_JOIN Term_TermType = 48 - Term_OUTER_JOIN Term_TermType = 49 - Term_EQ_JOIN Term_TermType = 50 - Term_ZIP Term_TermType = 72 - Term_RANGE Term_TermType = 173 - Term_INSERT_AT Term_TermType = 82 - Term_DELETE_AT Term_TermType = 83 - Term_CHANGE_AT Term_TermType = 84 - Term_SPLICE_AT Term_TermType = 85 - Term_COERCE_TO Term_TermType = 51 - Term_TYPE_OF Term_TermType = 52 - Term_UPDATE Term_TermType = 53 - Term_DELETE Term_TermType = 54 - Term_REPLACE Term_TermType = 55 - Term_INSERT Term_TermType = 56 - Term_DB_CREATE Term_TermType = 57 - Term_DB_DROP Term_TermType = 58 - Term_DB_LIST Term_TermType = 59 - Term_TABLE_CREATE Term_TermType = 60 - Term_TABLE_DROP Term_TermType = 61 - Term_TABLE_LIST Term_TermType = 62 - Term_CONFIG Term_TermType = 174 - Term_STATUS Term_TermType = 175 - Term_WAIT Term_TermType = 177 - Term_RECONFIGURE Term_TermType = 176 - Term_REBALANCE Term_TermType = 179 - Term_SYNC Term_TermType = 138 - Term_INDEX_CREATE Term_TermType = 75 - Term_INDEX_DROP Term_TermType = 76 - Term_INDEX_LIST Term_TermType = 77 - Term_INDEX_STATUS Term_TermType = 139 - Term_INDEX_WAIT Term_TermType = 140 - Term_INDEX_RENAME Term_TermType = 156 - Term_FUNCALL Term_TermType = 64 - Term_BRANCH Term_TermType = 65 - Term_OR Term_TermType = 66 - Term_AND Term_TermType = 67 - Term_FOR_EACH Term_TermType = 68 - Term_FUNC Term_TermType = 69 - Term_ASC Term_TermType = 73 - Term_DESC Term_TermType = 74 - Term_INFO Term_TermType = 79 - Term_MATCH Term_TermType = 97 - Term_UPCASE Term_TermType = 141 - Term_DOWNCASE Term_TermType = 142 - Term_SAMPLE Term_TermType = 81 - Term_DEFAULT Term_TermType = 92 - Term_JSON Term_TermType = 98 - Term_TO_JSON_STRING Term_TermType = 172 - Term_ISO8601 Term_TermType = 99 - Term_TO_ISO8601 Term_TermType = 100 - Term_EPOCH_TIME Term_TermType = 101 - Term_TO_EPOCH_TIME Term_TermType = 102 - Term_NOW Term_TermType = 103 - Term_IN_TIMEZONE Term_TermType = 104 - Term_DURING Term_TermType = 105 - Term_DATE Term_TermType = 106 - Term_TIME_OF_DAY Term_TermType = 126 - Term_TIMEZONE Term_TermType = 127 - Term_YEAR Term_TermType = 128 - Term_MONTH Term_TermType = 129 - Term_DAY Term_TermType = 130 - Term_DAY_OF_WEEK Term_TermType = 131 - Term_DAY_OF_YEAR Term_TermType = 132 - Term_HOURS Term_TermType = 133 - Term_MINUTES Term_TermType = 134 - Term_SECONDS Term_TermType = 135 - Term_TIME Term_TermType = 136 - Term_MONDAY Term_TermType = 107 - Term_TUESDAY Term_TermType = 108 - Term_WEDNESDAY Term_TermType = 109 - Term_THURSDAY Term_TermType = 110 - Term_FRIDAY Term_TermType = 111 - Term_SATURDAY Term_TermType = 112 - Term_SUNDAY Term_TermType = 113 - Term_JANUARY Term_TermType = 114 - Term_FEBRUARY Term_TermType = 115 - Term_MARCH Term_TermType = 116 - Term_APRIL Term_TermType = 117 - Term_MAY Term_TermType = 118 - Term_JUNE Term_TermType = 119 - Term_JULY Term_TermType = 120 - Term_AUGUST Term_TermType = 121 - Term_SEPTEMBER Term_TermType = 122 - Term_OCTOBER Term_TermType = 123 - Term_NOVEMBER Term_TermType = 124 - Term_DECEMBER Term_TermType = 125 - Term_LITERAL Term_TermType = 137 - Term_GROUP Term_TermType = 144 - Term_SUM Term_TermType = 145 - Term_AVG Term_TermType = 146 - Term_MIN Term_TermType = 147 - Term_MAX Term_TermType = 148 - Term_SPLIT Term_TermType = 149 - Term_UNGROUP Term_TermType = 150 - Term_RANDOM Term_TermType = 151 - Term_CHANGES Term_TermType = 152 - Term_ARGS Term_TermType = 154 - Term_BINARY Term_TermType = 155 - Term_GEOJSON Term_TermType = 157 - Term_TO_GEOJSON Term_TermType = 158 - Term_POINT Term_TermType = 159 - Term_LINE Term_TermType = 160 - Term_POLYGON Term_TermType = 161 - Term_DISTANCE Term_TermType = 162 - Term_INTERSECTS Term_TermType = 163 - Term_INCLUDES Term_TermType = 164 - Term_CIRCLE Term_TermType = 165 - Term_GET_INTERSECTING Term_TermType = 166 - Term_FILL Term_TermType = 167 - Term_GET_NEAREST Term_TermType = 168 - Term_POLYGON_SUB Term_TermType = 171 - Term_MINVAL Term_TermType = 180 - Term_MAXVAL Term_TermType = 181 -) - -var Term_TermType_name = map[int32]string{ - 1: "DATUM", - 2: "MAKE_ARRAY", - 3: "MAKE_OBJ", - 10: "VAR", - 11: "JAVASCRIPT", - 169: "UUID", - 153: "HTTP", - 12: "ERROR", - 13: "IMPLICIT_VAR", - 14: "DB", - 15: "TABLE", - 16: "GET", - 78: "GET_ALL", - 17: "EQ", - 18: "NE", - 19: "LT", - 20: "LE", - 21: "GT", - 22: "GE", - 23: "NOT", - 24: "ADD", - 25: "SUB", - 26: "MUL", - 27: "DIV", - 28: "MOD", - 29: "APPEND", - 80: "PREPEND", - 95: "DIFFERENCE", - 88: "SET_INSERT", - 89: "SET_INTERSECTION", - 90: "SET_UNION", - 91: "SET_DIFFERENCE", - 30: "SLICE", - 70: "SKIP", - 71: "LIMIT", - 87: "OFFSETS_OF", - 93: "CONTAINS", - 31: "GET_FIELD", - 94: "KEYS", - 143: "OBJECT", - 32: "HAS_FIELDS", - 96: "WITH_FIELDS", - 33: "PLUCK", - 34: "WITHOUT", - 35: "MERGE", - 36: "BETWEEN_DEPRECATED", - 182: "BETWEEN", - 37: "REDUCE", - 38: "MAP", - 39: "FILTER", - 40: "CONCAT_MAP", - 41: "ORDER_BY", - 42: "DISTINCT", - 43: "COUNT", - 86: "IS_EMPTY", - 44: "UNION", - 45: "NTH", - 170: "BRACKET", - 48: "INNER_JOIN", - 49: "OUTER_JOIN", - 50: "EQ_JOIN", - 72: "ZIP", - 173: "RANGE", - 82: "INSERT_AT", - 83: "DELETE_AT", - 84: "CHANGE_AT", - 85: "SPLICE_AT", - 51: "COERCE_TO", - 52: "TYPE_OF", - 53: "UPDATE", - 54: "DELETE", - 55: "REPLACE", - 56: "INSERT", - 57: "DB_CREATE", - 58: "DB_DROP", - 59: "DB_LIST", - 60: "TABLE_CREATE", - 61: "TABLE_DROP", - 62: "TABLE_LIST", - 174: "CONFIG", - 175: "STATUS", - 177: "WAIT", - 176: "RECONFIGURE", - 179: "REBALANCE", - 138: "SYNC", - 75: "INDEX_CREATE", - 76: "INDEX_DROP", - 77: "INDEX_LIST", - 139: "INDEX_STATUS", - 140: "INDEX_WAIT", - 156: "INDEX_RENAME", - 64: "FUNCALL", - 65: "BRANCH", - 66: "OR", - 67: "AND", - 68: "FOR_EACH", - 69: "FUNC", - 73: "ASC", - 74: "DESC", - 79: "INFO", - 97: "MATCH", - 141: "UPCASE", - 142: "DOWNCASE", - 81: "SAMPLE", - 92: "DEFAULT", - 98: "JSON", - 172: "TO_JSON_STRING", - 99: "ISO8601", - 100: "TO_ISO8601", - 101: "EPOCH_TIME", - 102: "TO_EPOCH_TIME", - 103: "NOW", - 104: "IN_TIMEZONE", - 105: "DURING", - 106: "DATE", - 126: "TIME_OF_DAY", - 127: "TIMEZONE", - 128: "YEAR", - 129: "MONTH", - 130: "DAY", - 131: "DAY_OF_WEEK", - 132: "DAY_OF_YEAR", - 133: "HOURS", - 134: "MINUTES", - 135: "SECONDS", - 136: "TIME", - 107: "MONDAY", - 108: "TUESDAY", - 109: "WEDNESDAY", - 110: "THURSDAY", - 111: "FRIDAY", - 112: "SATURDAY", - 113: "SUNDAY", - 114: "JANUARY", - 115: "FEBRUARY", - 116: "MARCH", - 117: "APRIL", - 118: "MAY", - 119: "JUNE", - 120: "JULY", - 121: "AUGUST", - 122: "SEPTEMBER", - 123: "OCTOBER", - 124: "NOVEMBER", - 125: "DECEMBER", - 137: "LITERAL", - 144: "GROUP", - 145: "SUM", - 146: "AVG", - 147: "MIN", - 148: "MAX", - 149: "SPLIT", - 150: "UNGROUP", - 151: "RANDOM", - 152: "CHANGES", - 154: "ARGS", - 155: "BINARY", - 157: "GEOJSON", - 158: "TO_GEOJSON", - 159: "POINT", - 160: "LINE", - 161: "POLYGON", - 162: "DISTANCE", - 163: "INTERSECTS", - 164: "INCLUDES", - 165: "CIRCLE", - 166: "GET_INTERSECTING", - 167: "FILL", - 168: "GET_NEAREST", - 171: "POLYGON_SUB", - 180: "MINVAL", - 181: "MAXVAL", -} -var Term_TermType_value = map[string]int32{ - "DATUM": 1, - "MAKE_ARRAY": 2, - "MAKE_OBJ": 3, - "VAR": 10, - "JAVASCRIPT": 11, - "UUID": 169, - "HTTP": 153, - "ERROR": 12, - "IMPLICIT_VAR": 13, - "DB": 14, - "TABLE": 15, - "GET": 16, - "GET_ALL": 78, - "EQ": 17, - "NE": 18, - "LT": 19, - "LE": 20, - "GT": 21, - "GE": 22, - "NOT": 23, - "ADD": 24, - "SUB": 25, - "MUL": 26, - "DIV": 27, - "MOD": 28, - "APPEND": 29, - "PREPEND": 80, - "DIFFERENCE": 95, - "SET_INSERT": 88, - "SET_INTERSECTION": 89, - "SET_UNION": 90, - "SET_DIFFERENCE": 91, - "SLICE": 30, - "SKIP": 70, - "LIMIT": 71, - "OFFSETS_OF": 87, - "CONTAINS": 93, - "GET_FIELD": 31, - "KEYS": 94, - "OBJECT": 143, - "HAS_FIELDS": 32, - "WITH_FIELDS": 96, - "PLUCK": 33, - "WITHOUT": 34, - "MERGE": 35, - "BETWEEN_DEPRECATED": 36, - "BETWEEN": 182, - "REDUCE": 37, - "MAP": 38, - "FILTER": 39, - "CONCAT_MAP": 40, - "ORDER_BY": 41, - "DISTINCT": 42, - "COUNT": 43, - "IS_EMPTY": 86, - "UNION": 44, - "NTH": 45, - "BRACKET": 170, - "INNER_JOIN": 48, - "OUTER_JOIN": 49, - "EQ_JOIN": 50, - "ZIP": 72, - "RANGE": 173, - "INSERT_AT": 82, - "DELETE_AT": 83, - "CHANGE_AT": 84, - "SPLICE_AT": 85, - "COERCE_TO": 51, - "TYPE_OF": 52, - "UPDATE": 53, - "DELETE": 54, - "REPLACE": 55, - "INSERT": 56, - "DB_CREATE": 57, - "DB_DROP": 58, - "DB_LIST": 59, - "TABLE_CREATE": 60, - "TABLE_DROP": 61, - "TABLE_LIST": 62, - "CONFIG": 174, - "STATUS": 175, - "WAIT": 177, - "RECONFIGURE": 176, - "REBALANCE": 179, - "SYNC": 138, - "INDEX_CREATE": 75, - "INDEX_DROP": 76, - "INDEX_LIST": 77, - "INDEX_STATUS": 139, - "INDEX_WAIT": 140, - "INDEX_RENAME": 156, - "FUNCALL": 64, - "BRANCH": 65, - "OR": 66, - "AND": 67, - "FOR_EACH": 68, - "FUNC": 69, - "ASC": 73, - "DESC": 74, - "INFO": 79, - "MATCH": 97, - "UPCASE": 141, - "DOWNCASE": 142, - "SAMPLE": 81, - "DEFAULT": 92, - "JSON": 98, - "TO_JSON_STRING": 172, - "ISO8601": 99, - "TO_ISO8601": 100, - "EPOCH_TIME": 101, - "TO_EPOCH_TIME": 102, - "NOW": 103, - "IN_TIMEZONE": 104, - "DURING": 105, - "DATE": 106, - "TIME_OF_DAY": 126, - "TIMEZONE": 127, - "YEAR": 128, - "MONTH": 129, - "DAY": 130, - "DAY_OF_WEEK": 131, - "DAY_OF_YEAR": 132, - "HOURS": 133, - "MINUTES": 134, - "SECONDS": 135, - "TIME": 136, - "MONDAY": 107, - "TUESDAY": 108, - "WEDNESDAY": 109, - "THURSDAY": 110, - "FRIDAY": 111, - "SATURDAY": 112, - "SUNDAY": 113, - "JANUARY": 114, - "FEBRUARY": 115, - "MARCH": 116, - "APRIL": 117, - "MAY": 118, - "JUNE": 119, - "JULY": 120, - "AUGUST": 121, - "SEPTEMBER": 122, - "OCTOBER": 123, - "NOVEMBER": 124, - "DECEMBER": 125, - "LITERAL": 137, - "GROUP": 144, - "SUM": 145, - "AVG": 146, - "MIN": 147, - "MAX": 148, - "SPLIT": 149, - "UNGROUP": 150, - "RANDOM": 151, - "CHANGES": 152, - "ARGS": 154, - "BINARY": 155, - "GEOJSON": 157, - "TO_GEOJSON": 158, - "POINT": 159, - "LINE": 160, - "POLYGON": 161, - "DISTANCE": 162, - "INTERSECTS": 163, - "INCLUDES": 164, - "CIRCLE": 165, - "GET_INTERSECTING": 166, - "FILL": 167, - "GET_NEAREST": 168, - "POLYGON_SUB": 171, - "MINVAL": 180, - "MAXVAL": 181, -} - -func (x Term_TermType) Enum() *Term_TermType { - p := new(Term_TermType) - *p = x - return p -} -func (x Term_TermType) String() string { - return proto.EnumName(Term_TermType_name, int32(x)) -} -func (x Term_TermType) MarshalJSON() ([]byte, error) { - return json.Marshal(x.String()) -} -func (x *Term_TermType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Term_TermType_value, data, "Term_TermType") - if err != nil { - return err - } - *x = Term_TermType(value) - return nil -} - -type VersionDummy struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *VersionDummy) Reset() { *m = VersionDummy{} } -func (m *VersionDummy) String() string { return proto.CompactTextString(m) } -func (*VersionDummy) ProtoMessage() {} - -type Query struct { - Type *Query_QueryType `protobuf:"varint,1,opt,name=type,enum=Query_QueryType" json:"type,omitempty"` - Query *Term `protobuf:"bytes,2,opt,name=query" json:"query,omitempty"` - Token *int64 `protobuf:"varint,3,opt,name=token" json:"token,omitempty"` - OBSOLETENoreply *bool `protobuf:"varint,4,opt,name=OBSOLETE_noreply,def=0" json:"OBSOLETE_noreply,omitempty"` - AcceptsRJson *bool `protobuf:"varint,5,opt,name=accepts_r_json,def=0" json:"accepts_r_json,omitempty"` - GlobalOptargs []*Query_AssocPair `protobuf:"bytes,6,rep,name=global_optargs" json:"global_optargs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} - -const Default_Query_OBSOLETENoreply bool = false -const Default_Query_AcceptsRJson bool = false - -func (m *Query) GetType() Query_QueryType { - if m != nil && m.Type != nil { - return *m.Type - } - return 0 -} - -func (m *Query) GetQuery() *Term { - if m != nil { - return m.Query - } - return nil -} - -func (m *Query) GetToken() int64 { - if m != nil && m.Token != nil { - return *m.Token - } - return 0 -} - -func (m *Query) GetOBSOLETENoreply() bool { - if m != nil && m.OBSOLETENoreply != nil { - return *m.OBSOLETENoreply - } - return Default_Query_OBSOLETENoreply -} - -func (m *Query) GetAcceptsRJson() bool { - if m != nil && m.AcceptsRJson != nil { - return *m.AcceptsRJson - } - return Default_Query_AcceptsRJson -} - -func (m *Query) GetGlobalOptargs() []*Query_AssocPair { - if m != nil { - return m.GlobalOptargs - } - return nil -} - -type Query_AssocPair struct { - Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Val *Term `protobuf:"bytes,2,opt,name=val" json:"val,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query_AssocPair) Reset() { *m = Query_AssocPair{} } -func (m *Query_AssocPair) String() string { return proto.CompactTextString(m) } -func (*Query_AssocPair) ProtoMessage() {} - -func (m *Query_AssocPair) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *Query_AssocPair) GetVal() *Term { - if m != nil { - return m.Val - } - return nil -} - -type Frame struct { - Type *Frame_FrameType `protobuf:"varint,1,opt,name=type,enum=Frame_FrameType" json:"type,omitempty"` - Pos *int64 `protobuf:"varint,2,opt,name=pos" json:"pos,omitempty"` - Opt *string `protobuf:"bytes,3,opt,name=opt" json:"opt,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Frame) Reset() { *m = Frame{} } -func (m *Frame) String() string { return proto.CompactTextString(m) } -func (*Frame) ProtoMessage() {} - -func (m *Frame) GetType() Frame_FrameType { - if m != nil && m.Type != nil { - return *m.Type - } - return 0 -} - -func (m *Frame) GetPos() int64 { - if m != nil && m.Pos != nil { - return *m.Pos - } - return 0 -} - -func (m *Frame) GetOpt() string { - if m != nil && m.Opt != nil { - return *m.Opt - } - return "" -} - -type Backtrace struct { - Frames []*Frame `protobuf:"bytes,1,rep,name=frames" json:"frames,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Backtrace) Reset() { *m = Backtrace{} } -func (m *Backtrace) String() string { return proto.CompactTextString(m) } -func (*Backtrace) ProtoMessage() {} - -func (m *Backtrace) GetFrames() []*Frame { - if m != nil { - return m.Frames - } - return nil -} - -type Response struct { - Type *Response_ResponseType `protobuf:"varint,1,opt,name=type,enum=Response_ResponseType" json:"type,omitempty"` - Notes []Response_ResponseNote `protobuf:"varint,6,rep,name=notes,enum=Response_ResponseNote" json:"notes,omitempty"` - Token *int64 `protobuf:"varint,2,opt,name=token" json:"token,omitempty"` - Response []*Datum `protobuf:"bytes,3,rep,name=response" json:"response,omitempty"` - Backtrace *Backtrace `protobuf:"bytes,4,opt,name=backtrace" json:"backtrace,omitempty"` - Profile *Datum `protobuf:"bytes,5,opt,name=profile" json:"profile,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} - -func (m *Response) GetType() Response_ResponseType { - if m != nil && m.Type != nil { - return *m.Type - } - return 0 -} - -func (m *Response) GetNotes() []Response_ResponseNote { - if m != nil { - return m.Notes - } - return nil -} - -func (m *Response) GetToken() int64 { - if m != nil && m.Token != nil { - return *m.Token - } - return 0 -} - -func (m *Response) GetResponse() []*Datum { - if m != nil { - return m.Response - } - return nil -} - -func (m *Response) GetBacktrace() *Backtrace { - if m != nil { - return m.Backtrace - } - return nil -} - -func (m *Response) GetProfile() *Datum { - if m != nil { - return m.Profile - } - return nil -} - -type Datum struct { - Type *Datum_DatumType `protobuf:"varint,1,opt,name=type,enum=Datum_DatumType" json:"type,omitempty"` - RBool *bool `protobuf:"varint,2,opt,name=r_bool" json:"r_bool,omitempty"` - RNum *float64 `protobuf:"fixed64,3,opt,name=r_num" json:"r_num,omitempty"` - RStr *string `protobuf:"bytes,4,opt,name=r_str" json:"r_str,omitempty"` - RArray []*Datum `protobuf:"bytes,5,rep,name=r_array" json:"r_array,omitempty"` - RObject []*Datum_AssocPair `protobuf:"bytes,6,rep,name=r_object" json:"r_object,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Datum) Reset() { *m = Datum{} } -func (m *Datum) String() string { return proto.CompactTextString(m) } -func (*Datum) ProtoMessage() {} - -var extRange_Datum = []proto.ExtensionRange{ - {10000, 20000}, -} - -func (*Datum) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_Datum -} -func (m *Datum) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -func (m *Datum) GetType() Datum_DatumType { - if m != nil && m.Type != nil { - return *m.Type - } - return 0 -} - -func (m *Datum) GetRBool() bool { - if m != nil && m.RBool != nil { - return *m.RBool - } - return false -} - -func (m *Datum) GetRNum() float64 { - if m != nil && m.RNum != nil { - return *m.RNum - } - return 0 -} - -func (m *Datum) GetRStr() string { - if m != nil && m.RStr != nil { - return *m.RStr - } - return "" -} - -func (m *Datum) GetRArray() []*Datum { - if m != nil { - return m.RArray - } - return nil -} - -func (m *Datum) GetRObject() []*Datum_AssocPair { - if m != nil { - return m.RObject - } - return nil -} - -type Datum_AssocPair struct { - Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Val *Datum `protobuf:"bytes,2,opt,name=val" json:"val,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Datum_AssocPair) Reset() { *m = Datum_AssocPair{} } -func (m *Datum_AssocPair) String() string { return proto.CompactTextString(m) } -func (*Datum_AssocPair) ProtoMessage() {} - -func (m *Datum_AssocPair) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *Datum_AssocPair) GetVal() *Datum { - if m != nil { - return m.Val - } - return nil -} - -type Term struct { - Type *Term_TermType `protobuf:"varint,1,opt,name=type,enum=Term_TermType" json:"type,omitempty"` - Datum *Datum `protobuf:"bytes,2,opt,name=datum" json:"datum,omitempty"` - Args []*Term `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` - Optargs []*Term_AssocPair `protobuf:"bytes,4,rep,name=optargs" json:"optargs,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Term) Reset() { *m = Term{} } -func (m *Term) String() string { return proto.CompactTextString(m) } -func (*Term) ProtoMessage() {} - -var extRange_Term = []proto.ExtensionRange{ - {10000, 20000}, -} - -func (*Term) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_Term -} -func (m *Term) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -func (m *Term) GetType() Term_TermType { - if m != nil && m.Type != nil { - return *m.Type - } - return 0 -} - -func (m *Term) GetDatum() *Datum { - if m != nil { - return m.Datum - } - return nil -} - -func (m *Term) GetArgs() []*Term { - if m != nil { - return m.Args - } - return nil -} - -func (m *Term) GetOptargs() []*Term_AssocPair { - if m != nil { - return m.Optargs - } - return nil -} - -type Term_AssocPair struct { - Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Val *Term `protobuf:"bytes,2,opt,name=val" json:"val,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Term_AssocPair) Reset() { *m = Term_AssocPair{} } -func (m *Term_AssocPair) String() string { return proto.CompactTextString(m) } -func (*Term_AssocPair) ProtoMessage() {} - -func (m *Term_AssocPair) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *Term_AssocPair) GetVal() *Term { - if m != nil { - return m.Val - } - return nil -} - -func init() { - proto.RegisterEnum("VersionDummy_Version", VersionDummy_Version_name, VersionDummy_Version_value) - proto.RegisterEnum("VersionDummy_Protocol", VersionDummy_Protocol_name, VersionDummy_Protocol_value) - proto.RegisterEnum("Query_QueryType", Query_QueryType_name, Query_QueryType_value) - proto.RegisterEnum("Frame_FrameType", Frame_FrameType_name, Frame_FrameType_value) - proto.RegisterEnum("Response_ResponseType", Response_ResponseType_name, Response_ResponseType_value) - proto.RegisterEnum("Response_ResponseNote", Response_ResponseNote_name, Response_ResponseNote_value) - proto.RegisterEnum("Datum_DatumType", Datum_DatumType_name, Datum_DatumType_value) - proto.RegisterEnum("Term_TermType", Term_TermType_name, Term_TermType_value) -} diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/ql2/ql2.proto b/Godeps/_workspace/src/github.com/dancannon/gorethink/ql2/ql2.proto deleted file mode 100644 index 3ad50a2dc..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/ql2/ql2.proto +++ /dev/null @@ -1,805 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// THE HIGH-LEVEL VIEW // -//////////////////////////////////////////////////////////////////////////////// - -// Process: When you first open a connection, send the magic number -// for the version of the protobuf you're targeting (in the [Version] -// enum). This should **NOT** be sent as a protobuf; just send the -// little-endian 32-bit integer over the wire raw. This number should -// only be sent once per connection. - -// The magic number shall be followed by an authorization key. The -// first 4 bytes are the length of the key to be sent as a little-endian -// 32-bit integer, followed by the key string. Even if there is no key, -// an empty string should be sent (length 0 and no data). - -// Following the authorization key, the client shall send a magic number -// for the communication protocol they want to use (in the [Protocol] -// enum). This shall be a little-endian 32-bit integer. - -// The server will then respond with a NULL-terminated string response. -// "SUCCESS" indicates that the connection has been accepted. Any other -// response indicates an error, and the response string should describe -// the error. - -// Next, for each query you want to send, construct a [Query] protobuf -// and serialize it to a binary blob. Send the blob's size to the -// server encoded as a little-endian 32-bit integer, followed by the -// blob itself. You will recieve a [Response] protobuf back preceded -// by its own size, once again encoded as a little-endian 32-bit -// integer. You can see an example exchange below in **EXAMPLE**. - -// A query consists of a [Term] to evaluate and a unique-per-connection -// [token]. - -// Tokens are used for two things: -// * Keeping track of which responses correspond to which queries. -// * Batched queries. Some queries return lots of results, so we send back -// batches of <1000, and you need to send a [CONTINUE] query with the same -// token to get more results from the original query. -//////////////////////////////////////////////////////////////////////////////// - -message VersionDummy { // We need to wrap it like this for some - // non-conforming protobuf libraries - // This enum contains the magic numbers for your version. See **THE HIGH-LEVEL - // VIEW** for what to do with it. - enum Version { - V0_1 = 0x3f61ba36; - V0_2 = 0x723081e1; // Authorization key during handshake - V0_3 = 0x5f75e83e; // Authorization key and protocol during handshake - V0_4 = 0x400c2d20; // Queries execute in parallel - } - - // The protocol to use after the handshake, specified in V0_3 - enum Protocol { - PROTOBUF = 0x271ffc41; - JSON = 0x7e6970c7; - } -} - -// You send one of: -// * A [START] query with a [Term] to evaluate and a unique-per-connection token. -// * A [CONTINUE] query with the same token as a [START] query that returned -// [SUCCESS_PARTIAL] in its [Response]. -// * A [STOP] query with the same token as a [START] query that you want to stop. -// * A [NOREPLY_WAIT] query with a unique per-connection token. The server answers -// with a [WAIT_COMPLETE] [Response]. -message Query { - enum QueryType { - START = 1; // Start a new query. - CONTINUE = 2; // Continue a query that returned [SUCCESS_PARTIAL] - // (see [Response]). - STOP = 3; // Stop a query partway through executing. - NOREPLY_WAIT = 4; - // Wait for noreply operations to finish. - } - optional QueryType type = 1; - // A [Term] is how we represent the operations we want a query to perform. - optional Term query = 2; // only present when [type] = [START] - optional int64 token = 3; - // This flag is ignored on the server. `noreply` should be added - // to `global_optargs` instead (the key "noreply" should map to - // either true or false). - optional bool OBSOLETE_noreply = 4 [default = false]; - - // If this is set to [true], then [Datum] values will sometimes be - // of [DatumType] [R_JSON] (see below). This can provide enormous - // speedups in languages with poor protobuf libraries. - optional bool accepts_r_json = 5 [default = false]; - - message AssocPair { - optional string key = 1; - optional Term val = 2; - } - repeated AssocPair global_optargs = 6; -} - -// A backtrace frame (see `backtrace` in Response below) -message Frame { - enum FrameType { - POS = 1; // Error occured in a positional argument. - OPT = 2; // Error occured in an optional argument. - } - optional FrameType type = 1; - optional int64 pos = 2; // The index of the positional argument. - optional string opt = 3; // The name of the optional argument. -} -message Backtrace { - repeated Frame frames = 1; -} - -// You get back a response with the same [token] as your query. -message Response { - enum ResponseType { - // These response types indicate success. - SUCCESS_ATOM = 1; // Query returned a single RQL datatype. - SUCCESS_SEQUENCE = 2; // Query returned a sequence of RQL datatypes. - SUCCESS_PARTIAL = 3; // Query returned a partial sequence of RQL - // datatypes. If you send a [CONTINUE] query with - // the same token as this response, you will get - // more of the sequence. Keep sending [CONTINUE] - // queries until you get back [SUCCESS_SEQUENCE]. - WAIT_COMPLETE = 4; // A [NOREPLY_WAIT] query completed. - - // These response types indicate failure. - CLIENT_ERROR = 16; // Means the client is buggy. An example is if the - // client sends a malformed protobuf, or tries to - // send [CONTINUE] for an unknown token. - COMPILE_ERROR = 17; // Means the query failed during parsing or type - // checking. For example, if you pass too many - // arguments to a function. - RUNTIME_ERROR = 18; // Means the query failed at runtime. An example is - // if you add together two values from a table, but - // they turn out at runtime to be booleans rather - // than numbers. - } - optional ResponseType type = 1; - - // ResponseNotes are used to provide information about the query - // response that may be useful for people writing drivers or ORMs. - // Currently all the notes we send indicate that a stream has certain - // special properties. - enum ResponseNote { - // The stream is a changefeed stream (e.g. `r.table('test').changes()`). - SEQUENCE_FEED = 1; - // The stream is a point changefeed stream - // (e.g. `r.table('test').get(0).changes()`). - ATOM_FEED = 2; - // The stream is an order_by_limit changefeed stream - // (e.g. `r.table('test').order_by(index: 'id').limit(5).changes()`). - ORDER_BY_LIMIT_FEED = 3; - // The stream is a union of multiple changefeed types that can't be - // collapsed to a single type - // (e.g. `r.table('test').changes().union(r.table('test').get(0).changes())`). - UNIONED_FEED = 4; - // The stream is a changefeed stream and includes notes on what state - // the changefeed stream is in (e.g. objects of the form `{state: - // 'initializing'}`). - INCLUDES_STATES = 5; - } - repeated ResponseNote notes = 6; - - optional int64 token = 2; // Indicates what [Query] this response corresponds to. - - // [response] contains 1 RQL datum if [type] is [SUCCESS_ATOM], or many RQL - // data if [type] is [SUCCESS_SEQUENCE] or [SUCCESS_PARTIAL]. It contains 1 - // error message (of type [R_STR]) in all other cases. - repeated Datum response = 3; - - // If [type] is [CLIENT_ERROR], [TYPE_ERROR], or [RUNTIME_ERROR], then a - // backtrace will be provided. The backtrace says where in the query the - // error occured. Ideally this information will be presented to the user as - // a pretty-printed version of their query with the erroneous section - // underlined. A backtrace is a series of 0 or more [Frame]s, each of which - // specifies either the index of a positional argument or the name of an - // optional argument. (Those words will make more sense if you look at the - // [Term] message below.) - optional Backtrace backtrace = 4; // Contains n [Frame]s when you get back an error. - - // If the [global_optargs] in the [Query] that this [Response] is a - // response to contains a key "profile" which maps to a static value of - // true then [profile] will contain a [Datum] which provides profiling - // information about the execution of the query. This field should be - // returned to the user along with the result that would normally be - // returned (a datum or a cursor). In official drivers this is accomplished - // by putting them inside of an object with "value" mapping to the return - // value and "profile" mapping to the profile object. - optional Datum profile = 5; -} - -// A [Datum] is a chunk of data that can be serialized to disk or returned to -// the user in a Response. Currently we only support JSON types, but we may -// support other types in the future (e.g., a date type or an integer type). -message Datum { - enum DatumType { - R_NULL = 1; - R_BOOL = 2; - R_NUM = 3; // a double - R_STR = 4; - R_ARRAY = 5; - R_OBJECT = 6; - // This [DatumType] will only be used if [accepts_r_json] is - // set to [true] in [Query]. [r_str] will be filled with a - // JSON encoding of the [Datum]. - R_JSON = 7; // uses r_str - } - optional DatumType type = 1; - optional bool r_bool = 2; - optional double r_num = 3; - optional string r_str = 4; - - repeated Datum r_array = 5; - message AssocPair { - optional string key = 1; - optional Datum val = 2; - } - repeated AssocPair r_object = 6; - - extensions 10000 to 20000; -} - -// A [Term] is either a piece of data (see **Datum** above), or an operator and -// its operands. If you have a [Datum], it's stored in the member [datum]. If -// you have an operator, its positional arguments are stored in [args] and its -// optional arguments are stored in [optargs]. -// -// A note about type signatures: -// We use the following notation to denote types: -// arg1_type, arg2_type, argrest_type... -> result_type -// So, for example, if we have a function `avg` that takes any number of -// arguments and averages them, we might write: -// NUMBER... -> NUMBER -// Or if we had a function that took one number modulo another: -// NUMBER, NUMBER -> NUMBER -// Or a function that takes a table and a primary key of any Datum type, then -// retrieves the entry with that primary key: -// Table, DATUM -> OBJECT -// Some arguments must be provided as literal values (and not the results of sub -// terms). These are marked with a `!`. -// Optional arguments are specified within curly braces as argname `:` value -// type (e.x `{use_outdated:BOOL}`) -// Many RQL operations are polymorphic. For these, alterantive type signatures -// are separated by `|`. -// -// The RQL type hierarchy is as follows: -// Top -// DATUM -// NULL -// BOOL -// NUMBER -// STRING -// OBJECT -// SingleSelection -// ARRAY -// Sequence -// ARRAY -// Stream -// StreamSelection -// Table -// Database -// Function -// Ordering - used only by ORDER_BY -// Pathspec -- an object, string, or array that specifies a path -// Error -message Term { - enum TermType { - // A RQL datum, stored in `datum` below. - DATUM = 1; - - MAKE_ARRAY = 2; // DATUM... -> ARRAY - // Evaluate the terms in [optargs] and make an object - MAKE_OBJ = 3; // {...} -> OBJECT - - // * Compound types - - // Takes an integer representing a variable and returns the value stored - // in that variable. It's the responsibility of the client to translate - // from their local representation of a variable to a unique _non-negative_ - // integer for that variable. (We do it this way instead of letting - // clients provide variable names as strings to discourage - // variable-capturing client libraries, and because it's more efficient - // on the wire.) - VAR = 10; // !NUMBER -> DATUM - // Takes some javascript code and executes it. - JAVASCRIPT = 11; // STRING {timeout: !NUMBER} -> DATUM | - // STRING {timeout: !NUMBER} -> Function(*) - UUID = 169; // () -> DATUM - - // Takes an HTTP URL and gets it. If the get succeeds and - // returns valid JSON, it is converted into a DATUM - HTTP = 153; // STRING {data: OBJECT | STRING, - // timeout: !NUMBER, - // method: STRING, - // params: OBJECT, - // header: OBJECT | ARRAY, - // attempts: NUMBER, - // redirects: NUMBER, - // verify: BOOL, - // page: FUNC | STRING, - // page_limit: NUMBER, - // auth: OBJECT, - // result_format: STRING, - // } -> STRING | STREAM - - // Takes a string and throws an error with that message. - // Inside of a `default` block, you can omit the first - // argument to rethrow whatever error you catch (this is most - // useful as an argument to the `default` filter optarg). - ERROR = 12; // STRING -> Error | -> Error - // Takes nothing and returns a reference to the implicit variable. - IMPLICIT_VAR = 13; // -> DATUM - - // * Data Operators - // Returns a reference to a database. - DB = 14; // STRING -> Database - // Returns a reference to a table. - TABLE = 15; // Database, STRING, {use_outdated:BOOL, identifier_format:STRING} -> Table - // STRING, {use_outdated:BOOL, identifier_format:STRING} -> Table - // Gets a single element from a table by its primary or a secondary key. - GET = 16; // Table, STRING -> SingleSelection | Table, NUMBER -> SingleSelection | - // Table, STRING -> NULL | Table, NUMBER -> NULL | - GET_ALL = 78; // Table, DATUM..., {index:!STRING} => ARRAY - - // Simple DATUM Ops - EQ = 17; // DATUM... -> BOOL - NE = 18; // DATUM... -> BOOL - LT = 19; // DATUM... -> BOOL - LE = 20; // DATUM... -> BOOL - GT = 21; // DATUM... -> BOOL - GE = 22; // DATUM... -> BOOL - NOT = 23; // BOOL -> BOOL - // ADD can either add two numbers or concatenate two arrays. - ADD = 24; // NUMBER... -> NUMBER | STRING... -> STRING - SUB = 25; // NUMBER... -> NUMBER - MUL = 26; // NUMBER... -> NUMBER - DIV = 27; // NUMBER... -> NUMBER - MOD = 28; // NUMBER, NUMBER -> NUMBER - - // DATUM Array Ops - // Append a single element to the end of an array (like `snoc`). - APPEND = 29; // ARRAY, DATUM -> ARRAY - // Prepend a single element to the end of an array (like `cons`). - PREPEND = 80; // ARRAY, DATUM -> ARRAY - //Remove the elements of one array from another array. - DIFFERENCE = 95; // ARRAY, ARRAY -> ARRAY - - // DATUM Set Ops - // Set ops work on arrays. They don't use actual sets and thus have - // performance characteristics you would expect from arrays rather than - // from sets. All set operations have the post condition that they - // array they return contains no duplicate values. - SET_INSERT = 88; // ARRAY, DATUM -> ARRAY - SET_INTERSECTION = 89; // ARRAY, ARRAY -> ARRAY - SET_UNION = 90; // ARRAY, ARRAY -> ARRAY - SET_DIFFERENCE = 91; // ARRAY, ARRAY -> ARRAY - - SLICE = 30; // Sequence, NUMBER, NUMBER -> Sequence - SKIP = 70; // Sequence, NUMBER -> Sequence - LIMIT = 71; // Sequence, NUMBER -> Sequence - OFFSETS_OF = 87; // Sequence, DATUM -> Sequence | Sequence, Function(1) -> Sequence - CONTAINS = 93; // Sequence, DATUM -> BOOL | Sequence, Function(1) -> BOOL - - // Stream/Object Ops - // Get a particular field from an object, or map that over a - // sequence. - GET_FIELD = 31; // OBJECT, STRING -> DATUM - // | Sequence, STRING -> Sequence - // Return an array containing the keys of the object. - KEYS = 94; // OBJECT -> ARRAY - // Creates an object - OBJECT = 143; // STRING, DATUM, ... -> OBJECT - // Check whether an object contains all the specified fields, - // or filters a sequence so that all objects inside of it - // contain all the specified fields. - HAS_FIELDS = 32; // OBJECT, Pathspec... -> BOOL - // x.with_fields(...) <=> x.has_fields(...).pluck(...) - WITH_FIELDS = 96; // Sequence, Pathspec... -> Sequence - // Get a subset of an object by selecting some attributes to preserve, - // or map that over a sequence. (Both pick and pluck, polymorphic.) - PLUCK = 33; // Sequence, Pathspec... -> Sequence | OBJECT, Pathspec... -> OBJECT - // Get a subset of an object by selecting some attributes to discard, or - // map that over a sequence. (Both unpick and without, polymorphic.) - WITHOUT = 34; // Sequence, Pathspec... -> Sequence | OBJECT, Pathspec... -> OBJECT - // Merge objects (right-preferential) - MERGE = 35; // OBJECT... -> OBJECT | Sequence -> Sequence - - // Sequence Ops - // Get all elements of a sequence between two values. - // Half-open by default, but the openness of either side can be - // changed by passing 'closed' or 'open for `right_bound` or - // `left_bound`. - BETWEEN_DEPRECATED = 36; // Deprecated version of between, which allows `null` to specify unboundedness - // With the newer version, clients should use `r.minval` and `r.maxval` for unboundedness - BETWEEN = 182; // StreamSelection, DATUM, DATUM, {index:!STRING, right_bound:STRING, left_bound:STRING} -> StreamSelection - REDUCE = 37; // Sequence, Function(2) -> DATUM - MAP = 38; // Sequence, Function(1) -> Sequence - // The arity of the function should be - // Sequence..., Function(sizeof...(Sequence)) -> Sequence - - // Filter a sequence with either a function or a shortcut - // object (see API docs for details). The body of FILTER is - // wrapped in an implicit `.default(false)`, and you can - // change the default value by specifying the `default` - // optarg. If you make the default `r.error`, all errors - // caught by `default` will be rethrown as if the `default` - // did not exist. - FILTER = 39; // Sequence, Function(1), {default:DATUM} -> Sequence | - // Sequence, OBJECT, {default:DATUM} -> Sequence - // Map a function over a sequence and then concatenate the results together. - CONCAT_MAP = 40; // Sequence, Function(1) -> Sequence - // Order a sequence based on one or more attributes. - ORDER_BY = 41; // Sequence, (!STRING | Ordering)... -> Sequence - // Get all distinct elements of a sequence (like `uniq`). - DISTINCT = 42; // Sequence -> Sequence - // Count the number of elements in a sequence, or only the elements that match - // a given filter. - COUNT = 43; // Sequence -> NUMBER | Sequence, DATUM -> NUMBER | Sequence, Function(1) -> NUMBER - IS_EMPTY = 86; // Sequence -> BOOL - // Take the union of multiple sequences (preserves duplicate elements! (use distinct)). - UNION = 44; // Sequence... -> Sequence - // Get the Nth element of a sequence. - NTH = 45; // Sequence, NUMBER -> DATUM - // do NTH or GET_FIELD depending on target object - BRACKET = 170; // Sequence | OBJECT, NUMBER | STRING -> DATUM - // OBSOLETE_GROUPED_MAPREDUCE = 46; - // OBSOLETE_GROUPBY = 47; - - INNER_JOIN = 48; // Sequence, Sequence, Function(2) -> Sequence - OUTER_JOIN = 49; // Sequence, Sequence, Function(2) -> Sequence - // An inner-join that does an equality comparison on two attributes. - EQ_JOIN = 50; // Sequence, !STRING, Sequence, {index:!STRING} -> Sequence - ZIP = 72; // Sequence -> Sequence - RANGE = 173; // -> Sequence [0, +inf) - // NUMBER -> Sequence [0, a) - // NUMBER, NUMBER -> Sequence [a, b) - - // Array Ops - // Insert an element in to an array at a given index. - INSERT_AT = 82; // ARRAY, NUMBER, DATUM -> ARRAY - // Remove an element at a given index from an array. - DELETE_AT = 83; // ARRAY, NUMBER -> ARRAY | - // ARRAY, NUMBER, NUMBER -> ARRAY - // Change the element at a given index of an array. - CHANGE_AT = 84; // ARRAY, NUMBER, DATUM -> ARRAY - // Splice one array in to another array. - SPLICE_AT = 85; // ARRAY, NUMBER, ARRAY -> ARRAY - - // * Type Ops - // Coerces a datum to a named type (e.g. "bool"). - // If you previously used `stream_to_array`, you should use this instead - // with the type "array". - COERCE_TO = 51; // Top, STRING -> Top - // Returns the named type of a datum (e.g. TYPE_OF(true) = "BOOL") - TYPE_OF = 52; // Top -> STRING - - // * Write Ops (the OBJECTs contain data about number of errors etc.) - // Updates all the rows in a selection. Calls its Function with the row - // to be updated, and then merges the result of that call. - UPDATE = 53; // StreamSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | - // SingleSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | - // StreamSelection, OBJECT, {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | - // SingleSelection, OBJECT, {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT - // Deletes all the rows in a selection. - DELETE = 54; // StreamSelection, {durability:STRING, return_changes:BOOL} -> OBJECT | SingleSelection -> OBJECT - // Replaces all the rows in a selection. Calls its Function with the row - // to be replaced, and then discards it and stores the result of that - // call. - REPLACE = 55; // StreamSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | SingleSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT - // Inserts into a table. If `conflict` is replace, overwrites - // entries with the same primary key. If `conflict` is - // update, does an update on the entry. If `conflict` is - // error, or is omitted, conflicts will trigger an error. - INSERT = 56; // Table, OBJECT, {conflict:STRING, durability:STRING, return_changes:BOOL} -> OBJECT | Table, Sequence, {conflict:STRING, durability:STRING, return_changes:BOOL} -> OBJECT - - // * Administrative OPs - // Creates a database with a particular name. - DB_CREATE = 57; // STRING -> OBJECT - // Drops a database with a particular name. - DB_DROP = 58; // STRING -> OBJECT - // Lists all the databases by name. (Takes no arguments) - DB_LIST = 59; // -> ARRAY - // Creates a table with a particular name in a particular - // database. (You may omit the first argument to use the - // default database.) - TABLE_CREATE = 60; // Database, STRING, {primary_key:STRING, shards:NUMBER, replicas:NUMBER, primary_replica_tag:STRING} -> OBJECT - // Database, STRING, {primary_key:STRING, shards:NUMBER, replicas:OBJECT, primary_replica_tag:STRING} -> OBJECT - // STRING, {primary_key:STRING, shards:NUMBER, replicas:NUMBER, primary_replica_tag:STRING} -> OBJECT - // STRING, {primary_key:STRING, shards:NUMBER, replicas:OBJECT, primary_replica_tag:STRING} -> OBJECT - // Drops a table with a particular name from a particular - // database. (You may omit the first argument to use the - // default database.) - TABLE_DROP = 61; // Database, STRING -> OBJECT - // STRING -> OBJECT - // Lists all the tables in a particular database. (You may - // omit the first argument to use the default database.) - TABLE_LIST = 62; // Database -> ARRAY - // -> ARRAY - // Returns the row in the `rethinkdb.table_config` or `rethinkdb.db_config` table - // that corresponds to the given database or table. - CONFIG = 174; // Database -> SingleSelection - // Table -> SingleSelection - // Returns the row in the `rethinkdb.table_status` table that corresponds to the - // given table. - STATUS = 175; // Table -> SingleSelection - // Called on a table, waits for that table to be ready for read/write operations. - // Called on a database, waits for all of the tables in the database to be ready. - // Returns the corresponding row or rows from the `rethinkdb.table_status` table. - WAIT = 177; // Table -> OBJECT - // Database -> OBJECT - // Generates a new config for the given table, or all tables in the given database - // The `shards` and `replicas` arguments are required - RECONFIGURE = 176; // Database, {shards:NUMBER, replicas:NUMBER[, primary_replica_tag:STRING, dry_run:BOOLEAN]} -> OBJECT - // Database, {shards:NUMBER, replicas:OBJECT[, primary_replica_tag:STRING, dry_run:BOOLEAN]} -> OBJECT - // Table, {shards:NUMBER, replicas:NUMBER[, primary_replica_tag:STRING, dry_run:BOOLEAN]} -> OBJECT - // Table, {shards:NUMBER, replicas:OBJECT[, primary_replica_tag:STRING, dry_run:BOOLEAN]} -> OBJECT - // Balances the table's shards but leaves everything else the same. Can also be - // applied to an entire database at once. - REBALANCE = 179; // Table -> OBJECT - // Database -> OBJECT - - // Ensures that previously issued soft-durability writes are complete and - // written to disk. - SYNC = 138; // Table -> OBJECT - - // * Secondary indexes OPs - // Creates a new secondary index with a particular name and definition. - INDEX_CREATE = 75; // Table, STRING, Function(1), {multi:BOOL} -> OBJECT - // Drops a secondary index with a particular name from the specified table. - INDEX_DROP = 76; // Table, STRING -> OBJECT - // Lists all secondary indexes on a particular table. - INDEX_LIST = 77; // Table -> ARRAY - // Gets information about whether or not a set of indexes are ready to - // be accessed. Returns a list of objects that look like this: - // {index:STRING, ready:BOOL[, blocks_processed:NUMBER, blocks_total:NUMBER]} - INDEX_STATUS = 139; // Table, STRING... -> ARRAY - // Blocks until a set of indexes are ready to be accessed. Returns the - // same values INDEX_STATUS. - INDEX_WAIT = 140; // Table, STRING... -> ARRAY - // Renames the given index to a new name - INDEX_RENAME = 156; // Table, STRING, STRING, {overwrite:BOOL} -> OBJECT - - // * Control Operators - // Calls a function on data - FUNCALL = 64; // Function(*), DATUM... -> DATUM - // Executes its first argument, and returns its second argument if it - // got [true] or its third argument if it got [false] (like an `if` - // statement). - BRANCH = 65; // BOOL, Top, Top -> Top - // Returns true if any of its arguments returns true (short-circuits). - OR = 66; // BOOL... -> BOOL - // Returns true if all of its arguments return true (short-circuits). - AND = 67; // BOOL... -> BOOL - // Calls its Function with each entry in the sequence - // and executes the array of terms that Function returns. - FOR_EACH = 68; // Sequence, Function(1) -> OBJECT - -//////////////////////////////////////////////////////////////////////////////// -////////// Special Terms -//////////////////////////////////////////////////////////////////////////////// - - // An anonymous function. Takes an array of numbers representing - // variables (see [VAR] above), and a [Term] to execute with those in - // scope. Returns a function that may be passed an array of arguments, - // then executes the Term with those bound to the variable names. The - // user will never construct this directly. We use it internally for - // things like `map` which take a function. The "arity" of a [Function] is - // the number of arguments it takes. - // For example, here's what `_X_.map{|x| x+2}` turns into: - // Term { - // type = MAP; - // args = [_X_, - // Term { - // type = Function; - // args = [Term { - // type = DATUM; - // datum = Datum { - // type = R_ARRAY; - // r_array = [Datum { type = R_NUM; r_num = 1; }]; - // }; - // }, - // Term { - // type = ADD; - // args = [Term { - // type = VAR; - // args = [Term { - // type = DATUM; - // datum = Datum { type = R_NUM; - // r_num = 1}; - // }]; - // }, - // Term { - // type = DATUM; - // datum = Datum { type = R_NUM; r_num = 2; }; - // }]; - // }]; - // }]; - FUNC = 69; // ARRAY, Top -> ARRAY -> Top - - // Indicates to ORDER_BY that this attribute is to be sorted in ascending order. - ASC = 73; // !STRING -> Ordering - // Indicates to ORDER_BY that this attribute is to be sorted in descending order. - DESC = 74; // !STRING -> Ordering - - // Gets info about anything. INFO is most commonly called on tables. - INFO = 79; // Top -> OBJECT - - // `a.match(b)` returns a match object if the string `a` - // matches the regular expression `b`. - MATCH = 97; // STRING, STRING -> DATUM - - // Change the case of a string. - UPCASE = 141; // STRING -> STRING - DOWNCASE = 142; // STRING -> STRING - - // Select a number of elements from sequence with uniform distribution. - SAMPLE = 81; // Sequence, NUMBER -> Sequence - - // Evaluates its first argument. If that argument returns - // NULL or throws an error related to the absence of an - // expected value (for instance, accessing a non-existent - // field or adding NULL to an integer), DEFAULT will either - // return its second argument or execute it if it's a - // function. If the second argument is a function, it will be - // passed either the text of the error or NULL as its - // argument. - DEFAULT = 92; // Top, Top -> Top - - // Parses its first argument as a json string and returns it as a - // datum. - JSON = 98; // STRING -> DATUM - // Returns the datum as a JSON string. - // N.B.: we would really prefer this be named TO_JSON and that exists as - // an alias in Python and JavaScript drivers; however it conflicts with the - // standard `to_json` method defined by Ruby's standard json library. - TO_JSON_STRING = 172; // DATUM -> STRING - - // Parses its first arguments as an ISO 8601 time and returns it as a - // datum. - ISO8601 = 99; // STRING -> PSEUDOTYPE(TIME) - // Prints a time as an ISO 8601 time. - TO_ISO8601 = 100; // PSEUDOTYPE(TIME) -> STRING - - // Returns a time given seconds since epoch in UTC. - EPOCH_TIME = 101; // NUMBER -> PSEUDOTYPE(TIME) - // Returns seconds since epoch in UTC given a time. - TO_EPOCH_TIME = 102; // PSEUDOTYPE(TIME) -> NUMBER - - // The time the query was received by the server. - NOW = 103; // -> PSEUDOTYPE(TIME) - // Puts a time into an ISO 8601 timezone. - IN_TIMEZONE = 104; // PSEUDOTYPE(TIME), STRING -> PSEUDOTYPE(TIME) - // a.during(b, c) returns whether a is in the range [b, c) - DURING = 105; // PSEUDOTYPE(TIME), PSEUDOTYPE(TIME), PSEUDOTYPE(TIME) -> BOOL - // Retrieves the date portion of a time. - DATE = 106; // PSEUDOTYPE(TIME) -> PSEUDOTYPE(TIME) - // x.time_of_day == x.date - x - TIME_OF_DAY = 126; // PSEUDOTYPE(TIME) -> NUMBER - // Returns the timezone of a time. - TIMEZONE = 127; // PSEUDOTYPE(TIME) -> STRING - - // These access the various components of a time. - YEAR = 128; // PSEUDOTYPE(TIME) -> NUMBER - MONTH = 129; // PSEUDOTYPE(TIME) -> NUMBER - DAY = 130; // PSEUDOTYPE(TIME) -> NUMBER - DAY_OF_WEEK = 131; // PSEUDOTYPE(TIME) -> NUMBER - DAY_OF_YEAR = 132; // PSEUDOTYPE(TIME) -> NUMBER - HOURS = 133; // PSEUDOTYPE(TIME) -> NUMBER - MINUTES = 134; // PSEUDOTYPE(TIME) -> NUMBER - SECONDS = 135; // PSEUDOTYPE(TIME) -> NUMBER - - // Construct a time from a date and optional timezone or a - // date+time and optional timezone. - TIME = 136; // NUMBER, NUMBER, NUMBER, STRING -> PSEUDOTYPE(TIME) | - // NUMBER, NUMBER, NUMBER, NUMBER, NUMBER, NUMBER, STRING -> PSEUDOTYPE(TIME) | - - // Constants for ISO 8601 days of the week. - MONDAY = 107; // -> 1 - TUESDAY = 108; // -> 2 - WEDNESDAY = 109; // -> 3 - THURSDAY = 110; // -> 4 - FRIDAY = 111; // -> 5 - SATURDAY = 112; // -> 6 - SUNDAY = 113; // -> 7 - - // Constants for ISO 8601 months. - JANUARY = 114; // -> 1 - FEBRUARY = 115; // -> 2 - MARCH = 116; // -> 3 - APRIL = 117; // -> 4 - MAY = 118; // -> 5 - JUNE = 119; // -> 6 - JULY = 120; // -> 7 - AUGUST = 121; // -> 8 - SEPTEMBER = 122; // -> 9 - OCTOBER = 123; // -> 10 - NOVEMBER = 124; // -> 11 - DECEMBER = 125; // -> 12 - - // Indicates to MERGE to replace the other object rather than merge it. - LITERAL = 137; // JSON -> Merging - - // SEQUENCE, STRING -> GROUPED_SEQUENCE | SEQUENCE, FUNCTION -> GROUPED_SEQUENCE - GROUP = 144; - SUM = 145; - AVG = 146; - MIN = 147; - MAX = 148; - - // `str.split()` splits on whitespace - // `str.split(" ")` splits on spaces only - // `str.split(" ", 5)` splits on spaces with at most 5 results - // `str.split(nil, 5)` splits on whitespace with at most 5 results - SPLIT = 149; // STRING -> ARRAY | STRING, STRING -> ARRAY | STRING, STRING, NUMBER -> ARRAY | STRING, NULL, NUMBER -> ARRAY - - UNGROUP = 150; // GROUPED_DATA -> ARRAY - - // Takes a range of numbers and returns a random number within the range - RANDOM = 151; // NUMBER, NUMBER {float:BOOL} -> DATUM - - CHANGES = 152; // TABLE -> STREAM - ARGS = 154; // ARRAY -> SPECIAL (used to splice arguments) - - // BINARY is client-only at the moment, it is not supported on the server - BINARY = 155; // STRING -> PSEUDOTYPE(BINARY) - - GEOJSON = 157; // OBJECT -> PSEUDOTYPE(GEOMETRY) - TO_GEOJSON = 158; // PSEUDOTYPE(GEOMETRY) -> OBJECT - POINT = 159; // NUMBER, NUMBER -> PSEUDOTYPE(GEOMETRY) - LINE = 160; // (ARRAY | PSEUDOTYPE(GEOMETRY))... -> PSEUDOTYPE(GEOMETRY) - POLYGON = 161; // (ARRAY | PSEUDOTYPE(GEOMETRY))... -> PSEUDOTYPE(GEOMETRY) - DISTANCE = 162; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) {geo_system:STRING, unit:STRING} -> NUMBER - INTERSECTS = 163; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) -> BOOL - INCLUDES = 164; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) -> BOOL - CIRCLE = 165; // PSEUDOTYPE(GEOMETRY), NUMBER {num_vertices:NUMBER, geo_system:STRING, unit:STRING, fill:BOOL} -> PSEUDOTYPE(GEOMETRY) - GET_INTERSECTING = 166; // TABLE, PSEUDOTYPE(GEOMETRY) {index:!STRING} -> StreamSelection - FILL = 167; // PSEUDOTYPE(GEOMETRY) -> PSEUDOTYPE(GEOMETRY) - GET_NEAREST = 168; // TABLE, PSEUDOTYPE(GEOMETRY) {index:!STRING, max_results:NUM, max_dist:NUM, geo_system:STRING, unit:STRING} -> ARRAY - POLYGON_SUB = 171; // PSEUDOTYPE(GEOMETRY), PSEUDOTYPE(GEOMETRY) -> PSEUDOTYPE(GEOMETRY) - - // Constants for specifying key ranges - MINVAL = 180; - MAXVAL = 181; - } - optional TermType type = 1; - - // This is only used when type is DATUM. - optional Datum datum = 2; - - repeated Term args = 3; // Holds the positional arguments of the query. - message AssocPair { - optional string key = 1; - optional Term val = 2; - } - repeated AssocPair optargs = 4; // Holds the optional arguments of the query. - // (Note that the order of the optional arguments doesn't matter; think of a - // Hash.) - - extensions 10000 to 20000; -} - -//////////////////////////////////////////////////////////////////////////////// -// EXAMPLE // -//////////////////////////////////////////////////////////////////////////////// -// ```ruby -// r.table('tbl', {:use_outdated => true}).insert([{:id => 0}, {:id => 1}]) -// ``` -// Would turn into: -// Term { -// type = INSERT; -// args = [Term { -// type = TABLE; -// args = [Term { -// type = DATUM; -// datum = Datum { type = R_STR; r_str = "tbl"; }; -// }]; -// optargs = [["use_outdated", -// Term { -// type = DATUM; -// datum = Datum { type = R_BOOL; r_bool = true; }; -// }]]; -// }, -// Term { -// type = MAKE_ARRAY; -// args = [Term { -// type = DATUM; -// datum = Datum { type = R_OBJECT; r_object = [["id", 0]]; }; -// }, -// Term { -// type = DATUM; -// datum = Datum { type = R_OBJECT; r_object = [["id", 1]]; }; -// }]; -// }] -// } -// And the server would reply: -// Response { -// type = SUCCESS_ATOM; -// token = 1; -// response = [Datum { type = R_OBJECT; r_object = [["inserted", 2]]; }]; -// } -// Or, if there were an error: -// Response { -// type = RUNTIME_ERROR; -// token = 1; -// response = [Datum { type = R_STR; r_str = "The table `tbl` doesn't exist!"; }]; -// backtrace = [Frame { type = POS; pos = 0; }, Frame { type = POS; pos = 0; }]; -// } diff --git a/Godeps/_workspace/src/github.com/dancannon/gorethink/types/geometry.go b/Godeps/_workspace/src/github.com/dancannon/gorethink/types/geometry.go deleted file mode 100644 index 00ff80f0d..000000000 --- a/Godeps/_workspace/src/github.com/dancannon/gorethink/types/geometry.go +++ /dev/null @@ -1,225 +0,0 @@ -package types - -import ( - "fmt" -) - -type Geometry struct { - Type string - Point Point - Line Line - Lines Lines -} - -func (g Geometry) MarshalRQL() (interface{}, error) { - switch g.Type { - case "Point": - return g.Point.MarshalRQL() - case "LineString": - return g.Line.MarshalRQL() - case "Polygon": - return g.Lines.MarshalRQL() - default: - return nil, fmt.Errorf("pseudo-type GEOMETRY object field 'type' %s is not valid", g.Type) - } -} - -func (g *Geometry) UnmarshalRQL(data interface{}) error { - if data, ok := data.(Geometry); ok { - g.Type = data.Type - g.Point = data.Point - g.Line = data.Line - g.Lines = data.Lines - - return nil - } - - m, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("pseudo-type GEOMETRY object is not valid") - } - - typ, ok := m["type"] - if !ok { - return fmt.Errorf("pseudo-type GEOMETRY object is not valid, expects 'type' field") - } - coords, ok := m["coordinates"] - if !ok { - return fmt.Errorf("pseudo-type GEOMETRY object is not valid, expects 'coordinates' field") - } - - var err error - switch typ { - case "Point": - g.Type = "Point" - g.Point, err = UnmarshalPoint(coords) - case "LineString": - g.Type = "LineString" - g.Line, err = UnmarshalLineString(coords) - case "Polygon": - g.Type = "Polygon" - g.Lines, err = UnmarshalPolygon(coords) - default: - return fmt.Errorf("pseudo-type GEOMETRY object has invalid type") - } - - if err != nil { - return err - } - - return nil -} - -type Point struct { - Lon float64 - Lat float64 -} -type Line []Point -type Lines []Line - -func (p Point) Coords() interface{} { - return []interface{}{p.Lon, p.Lat} -} - -func (p Point) MarshalRQL() (interface{}, error) { - return map[string]interface{}{ - "$reql_type$": "GEOMETRY", - "coordinates": p.Coords(), - "type": "Point", - }, nil -} - -func (p *Point) UnmarshalRQL(data interface{}) error { - g := &Geometry{} - err := g.UnmarshalRQL(data) - if err != nil { - return err - } - if g.Type != "Point" { - return fmt.Errorf("pseudo-type GEOMETRY object has type %s, expected type %s", g.Type, "Point") - } - - p.Lat = g.Point.Lat - p.Lon = g.Point.Lon - - return nil -} - -func (l Line) Coords() interface{} { - coords := make([]interface{}, len(l)) - for i, point := range l { - coords[i] = point.Coords() - } - return coords -} - -func (l Line) MarshalRQL() (interface{}, error) { - return map[string]interface{}{ - "$reql_type$": "GEOMETRY", - "coordinates": l.Coords(), - "type": "LineString", - }, nil -} - -func (l *Line) UnmarshalRQL(data interface{}) error { - g := &Geometry{} - err := g.UnmarshalRQL(data) - if err != nil { - return err - } - if g.Type != "LineString" { - return fmt.Errorf("pseudo-type GEOMETRY object has type %s, expected type %s", g.Type, "LineString") - } - - *l = g.Line - - return nil -} - -func (l Lines) Coords() interface{} { - coords := make([]interface{}, len(l)) - for i, line := range l { - coords[i] = line.Coords() - } - return coords -} - -func (l Lines) MarshalRQL() (interface{}, error) { - return map[string]interface{}{ - "$reql_type$": "GEOMETRY", - "coordinates": l.Coords(), - "type": "Polygon", - }, nil -} - -func (l *Lines) UnmarshalRQL(data interface{}) error { - g := &Geometry{} - err := g.UnmarshalRQL(data) - if err != nil { - return err - } - if g.Type != "Polygon" { - return fmt.Errorf("pseudo-type GEOMETRY object has type %s, expected type %s", g.Type, "Polygon") - } - - *l = g.Lines - - return nil -} - -func UnmarshalPoint(v interface{}) (Point, error) { - coords, ok := v.([]interface{}) - if !ok { - return Point{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") - } - if len(coords) != 2 { - return Point{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") - } - lon, ok := coords[0].(float64) - if !ok { - return Point{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") - } - lat, ok := coords[1].(float64) - if !ok { - return Point{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") - } - - return Point{ - Lon: lon, - Lat: lat, - }, nil -} - -func UnmarshalLineString(v interface{}) (Line, error) { - points, ok := v.([]interface{}) - if !ok { - return Line{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") - } - - var err error - line := make(Line, len(points)) - for i, coords := range points { - line[i], err = UnmarshalPoint(coords) - if err != nil { - return Line{}, err - } - } - return line, nil -} - -func UnmarshalPolygon(v interface{}) (Lines, error) { - lines, ok := v.([]interface{}) - if !ok { - return Lines{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") - } - - var err error - polygon := make(Lines, len(lines)) - for i, line := range lines { - polygon[i], err = UnmarshalLineString(line) - if err != nil { - return Lines{}, err - } - } - return polygon, nil -} diff --git a/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/README.md b/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/README.md deleted file mode 100644 index 2d1b3d932..000000000 --- a/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/README.md +++ /dev/null @@ -1,34 +0,0 @@ -circuit-breaker -=============== - -[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) -[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker) -[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) - -The circuit-breaker resiliency pattern for golang. - -Creating a breaker takes three parameters: -- error threshold (for opening the breaker) -- success threshold (for closing the breaker) -- timeout (how long to keep the breaker open) - -```go -b := breaker.New(3, 1, 5*time.Second) - -for { - result := b.Run(func() error { - // communicate with some external service and - // return an error if the communication failed - return nil - }) - - switch result { - case nil: - // success! - case breaker.ErrBreakerOpen: - // our function wasn't run because the breaker was open - default: - // some other error - } -} -``` diff --git a/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker.go b/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker.go deleted file mode 100644 index f88ca7248..000000000 --- a/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker.go +++ /dev/null @@ -1,161 +0,0 @@ -// Package breaker implements the circuit-breaker resiliency pattern for Go. -package breaker - -import ( - "errors" - "sync" - "sync/atomic" - "time" -) - -// ErrBreakerOpen is the error returned from Run() when the function is not executed -// because the breaker is currently open. -var ErrBreakerOpen = errors.New("circuit breaker is open") - -const ( - closed uint32 = iota - open - halfOpen -) - -// Breaker implements the circuit-breaker resiliency pattern -type Breaker struct { - errorThreshold, successThreshold int - timeout time.Duration - - lock sync.Mutex - state uint32 - errors, successes int - lastError time.Time -} - -// New constructs a new circuit-breaker that starts closed. -// From closed, the breaker opens if "errorThreshold" errors are seen -// without an error-free period of at least "timeout". From open, the -// breaker half-closes after "timeout". From half-open, the breaker closes -// after "successThreshold" consecutive successes, or opens on a single error. -func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker { - return &Breaker{ - errorThreshold: errorThreshold, - successThreshold: successThreshold, - timeout: timeout, - } -} - -// Run will either return ErrBreakerOpen immediately if the circuit-breaker is -// already open, or it will run the given function and pass along its return -// value. It is safe to call Run concurrently on the same Breaker. -func (b *Breaker) Run(work func() error) error { - state := atomic.LoadUint32(&b.state) - - if state == open { - return ErrBreakerOpen - } - - return b.doWork(state, work) -} - -// Go will either return ErrBreakerOpen immediately if the circuit-breaker is -// already open, or it will run the given function in a separate goroutine. -// If the function is run, Go will return nil immediately, and will *not* return -// the return value of the function. It is safe to call Go concurrently on the -// same Breaker. -func (b *Breaker) Go(work func() error) error { - state := atomic.LoadUint32(&b.state) - - if state == open { - return ErrBreakerOpen - } - - // errcheck complains about ignoring the error return value, but - // that's on purpose; if you want an error from a goroutine you have to - // get it over a channel or something - go b.doWork(state, work) - - return nil -} - -func (b *Breaker) doWork(state uint32, work func() error) error { - var panicValue interface{} - - result := func() error { - defer func() { - panicValue = recover() - }() - return work() - }() - - if result == nil && panicValue == nil && state == closed { - // short-circuit the normal, success path without contending - // on the lock - return nil - } - - // oh well, I guess we have to contend on the lock - b.processResult(result, panicValue) - - if panicValue != nil { - // as close as Go lets us come to a "rethrow" although unfortunately - // we lose the original panicing location - panic(panicValue) - } - - return result -} - -func (b *Breaker) processResult(result error, panicValue interface{}) { - b.lock.Lock() - defer b.lock.Unlock() - - if result == nil && panicValue == nil { - if b.state == halfOpen { - b.successes++ - if b.successes == b.successThreshold { - b.closeBreaker() - } - } - } else { - if b.errors > 0 { - expiry := b.lastError.Add(b.timeout) - if time.Now().After(expiry) { - b.errors = 0 - } - } - - switch b.state { - case closed: - b.errors++ - if b.errors == b.errorThreshold { - b.openBreaker() - } else { - b.lastError = time.Now() - } - case halfOpen: - b.openBreaker() - } - } -} - -func (b *Breaker) openBreaker() { - b.changeState(open) - go b.timer() -} - -func (b *Breaker) closeBreaker() { - b.changeState(closed) -} - -func (b *Breaker) timer() { - time.Sleep(b.timeout) - - b.lock.Lock() - defer b.lock.Unlock() - - b.changeState(halfOpen) -} - -func (b *Breaker) changeState(newState uint32) { - b.errors = 0 - b.successes = 0 - atomic.StoreUint32(&b.state, newState) -} diff --git a/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker_test.go b/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker_test.go deleted file mode 100644 index b41308db6..000000000 --- a/Godeps/_workspace/src/github.com/eapache/go-resiliency/breaker/breaker_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package breaker - -import ( - "errors" - "testing" - "time" -) - -var errSomeError = errors.New("errSomeError") - -func alwaysPanics() error { - panic("foo") -} - -func returnsError() error { - return errSomeError -} - -func returnsSuccess() error { - return nil -} - -func TestBreakerErrorExpiry(t *testing.T) { - breaker := New(2, 1, 1*time.Second) - - for i := 0; i < 3; i++ { - if err := breaker.Run(returnsError); err != errSomeError { - t.Error(err) - } - time.Sleep(1 * time.Second) - } - - for i := 0; i < 3; i++ { - if err := breaker.Go(returnsError); err != nil { - t.Error(err) - } - time.Sleep(1 * time.Second) - } -} - -func TestBreakerPanicsCountAsErrors(t *testing.T) { - breaker := New(3, 2, 1*time.Second) - - // three errors opens the breaker - for i := 0; i < 3; i++ { - func() { - defer func() { - val := recover() - if val.(string) != "foo" { - t.Error("incorrect panic") - } - }() - if err := breaker.Run(alwaysPanics); err != nil { - t.Error(err) - } - t.Error("shouldn't get here") - }() - } - - // breaker is open - for i := 0; i < 5; i++ { - if err := breaker.Run(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - } -} - -func TestBreakerStateTransitions(t *testing.T) { - breaker := New(3, 2, 1*time.Second) - - // three errors opens the breaker - for i := 0; i < 3; i++ { - if err := breaker.Run(returnsError); err != errSomeError { - t.Error(err) - } - } - - // breaker is open - for i := 0; i < 5; i++ { - if err := breaker.Run(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - } - - // wait for it to half-close - time.Sleep(2 * time.Second) - // one success works, but is not enough to fully close - if err := breaker.Run(returnsSuccess); err != nil { - t.Error(err) - } - // error works, but re-opens immediately - if err := breaker.Run(returnsError); err != errSomeError { - t.Error(err) - } - // breaker is open - if err := breaker.Run(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - - // wait for it to half-close - time.Sleep(2 * time.Second) - // two successes is enough to close it for good - for i := 0; i < 2; i++ { - if err := breaker.Run(returnsSuccess); err != nil { - t.Error(err) - } - } - // error works - if err := breaker.Run(returnsError); err != errSomeError { - t.Error(err) - } - // breaker is still closed - if err := breaker.Run(returnsSuccess); err != nil { - t.Error(err) - } -} - -func TestBreakerAsyncStateTransitions(t *testing.T) { - breaker := New(3, 2, 1*time.Second) - - // three errors opens the breaker - for i := 0; i < 3; i++ { - if err := breaker.Go(returnsError); err != nil { - t.Error(err) - } - } - - // just enough to yield the scheduler and let the goroutines work off - time.Sleep(1 * time.Millisecond) - - // breaker is open - for i := 0; i < 5; i++ { - if err := breaker.Go(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - } - - // wait for it to half-close - time.Sleep(2 * time.Second) - // one success works, but is not enough to fully close - if err := breaker.Go(returnsSuccess); err != nil { - t.Error(err) - } - // error works, but re-opens immediately - if err := breaker.Go(returnsError); err != nil { - t.Error(err) - } - // just enough to yield the scheduler and let the goroutines work off - time.Sleep(1 * time.Millisecond) - // breaker is open - if err := breaker.Go(returnsError); err != ErrBreakerOpen { - t.Error(err) - } - - // wait for it to half-close - time.Sleep(2 * time.Second) - // two successes is enough to close it for good - for i := 0; i < 2; i++ { - if err := breaker.Go(returnsSuccess); err != nil { - t.Error(err) - } - } - // just enough to yield the scheduler and let the goroutines work off - time.Sleep(1 * time.Millisecond) - // error works - if err := breaker.Go(returnsError); err != nil { - t.Error(err) - } - // just enough to yield the scheduler and let the goroutines work off - time.Sleep(1 * time.Millisecond) - // breaker is still closed - if err := breaker.Go(returnsSuccess); err != nil { - t.Error(err) - } -} - -func ExampleBreaker() { - breaker := New(3, 1, 5*time.Second) - - for { - result := breaker.Run(func() error { - // communicate with some external service and - // return an error if the communication failed - return nil - }) - - switch result { - case nil: - // success! - case ErrBreakerOpen: - // our function wasn't run because the breaker was open - default: - // some other error - } - } -} diff --git a/Godeps/_workspace/src/github.com/eapache/queue/.gitignore b/Godeps/_workspace/src/github.com/eapache/queue/.gitignore deleted file mode 100644 index 836562412..000000000 --- a/Godeps/_workspace/src/github.com/eapache/queue/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/Godeps/_workspace/src/github.com/eapache/queue/.travis.yml b/Godeps/_workspace/src/github.com/eapache/queue/.travis.yml deleted file mode 100644 index 235a40a49..000000000 --- a/Godeps/_workspace/src/github.com/eapache/queue/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -sudo: false - -go: - - 1.2 - - 1.3 - - 1.4 diff --git a/Godeps/_workspace/src/github.com/eapache/queue/LICENSE b/Godeps/_workspace/src/github.com/eapache/queue/LICENSE deleted file mode 100644 index d5f36dbca..000000000 --- a/Godeps/_workspace/src/github.com/eapache/queue/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/eapache/queue/README.md b/Godeps/_workspace/src/github.com/eapache/queue/README.md deleted file mode 100644 index 8e782335c..000000000 --- a/Godeps/_workspace/src/github.com/eapache/queue/README.md +++ /dev/null @@ -1,16 +0,0 @@ -Queue -===== - -[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue) -[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue) -[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) - -A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki. -Using this instead of other, simpler, queue implementations (slice+append or linked list) provides -substantial memory and time benefits, and fewer GC pauses. - -The queue implemented here is as fast as it is in part because it is *not* thread-safe. - -Follows semantic versioning using https://gopkg.in/ - import from -[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1) -for guaranteed API stability. diff --git a/Godeps/_workspace/src/github.com/eapache/queue/queue.go b/Godeps/_workspace/src/github.com/eapache/queue/queue.go deleted file mode 100644 index 2dc8d9395..000000000 --- a/Godeps/_workspace/src/github.com/eapache/queue/queue.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. -Using this instead of other, simpler, queue implementations (slice+append or linked list) provides -substantial memory and time benefits, and fewer GC pauses. - -The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe. -*/ -package queue - -const minQueueLen = 16 - -// Queue represents a single instance of the queue data structure. -type Queue struct { - buf []interface{} - head, tail, count int -} - -// New constructs and returns a new Queue. -func New() *Queue { - return &Queue{ - buf: make([]interface{}, minQueueLen), - } -} - -// Length returns the number of elements currently stored in the queue. -func (q *Queue) Length() int { - return q.count -} - -// resizes the queue to fit exactly twice its current contents -// this can result in shrinking if the queue is less than half-full -func (q *Queue) resize() { - newBuf := make([]interface{}, q.count*2) - - if q.tail > q.head { - copy(newBuf, q.buf[q.head:q.tail]) - } else { - n := copy(newBuf, q.buf[q.head:]) - copy(newBuf[n:], q.buf[:q.tail]) - } - - q.head = 0 - q.tail = q.count - q.buf = newBuf -} - -// Add puts an element on the end of the queue. -func (q *Queue) Add(elem interface{}) { - if q.count == len(q.buf) { - q.resize() - } - - q.buf[q.tail] = elem - q.tail = (q.tail + 1) % len(q.buf) - q.count++ -} - -// Peek returns the element at the head of the queue. This call panics -// if the queue is empty. -func (q *Queue) Peek() interface{} { - if q.count <= 0 { - panic("queue: Peek() called on empty queue") - } - return q.buf[q.head] -} - -// Get returns the element at index i in the queue. If the index is -// invalid, the call will panic. -func (q *Queue) Get(i int) interface{} { - if i < 0 || i >= q.count { - panic("queue: Get() called with index out of range") - } - return q.buf[(q.head+i)%len(q.buf)] -} - -// Remove removes the element from the front of the queue. If you actually -// want the element, call Peek first. This call panics if the queue is empty. -func (q *Queue) Remove() { - if q.count <= 0 { - panic("queue: Remove() called on empty queue") - } - q.buf[q.head] = nil - q.head = (q.head + 1) % len(q.buf) - q.count-- - if len(q.buf) > minQueueLen && q.count*4 == len(q.buf) { - q.resize() - } -} diff --git a/Godeps/_workspace/src/github.com/eapache/queue/queue_test.go b/Godeps/_workspace/src/github.com/eapache/queue/queue_test.go deleted file mode 100644 index f2765c14d..000000000 --- a/Godeps/_workspace/src/github.com/eapache/queue/queue_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package queue - -import "testing" - -func TestQueueSimple(t *testing.T) { - q := New() - - for i := 0; i < minQueueLen; i++ { - q.Add(i) - } - for i := 0; i < minQueueLen; i++ { - if q.Peek().(int) != i { - t.Error("peek", i, "had value", q.Peek()) - } - q.Remove() - } -} - -func TestQueueWrapping(t *testing.T) { - q := New() - - for i := 0; i < minQueueLen; i++ { - q.Add(i) - } - for i := 0; i < 3; i++ { - q.Remove() - q.Add(minQueueLen + i) - } - - for i := 0; i < minQueueLen; i++ { - if q.Peek().(int) != i+3 { - t.Error("peek", i, "had value", q.Peek()) - } - q.Remove() - } -} - -func TestQueueLength(t *testing.T) { - q := New() - - if q.Length() != 0 { - t.Error("empty queue length not 0") - } - - for i := 0; i < 1000; i++ { - q.Add(i) - if q.Length() != i+1 { - t.Error("adding: queue with", i, "elements has length", q.Length()) - } - } - for i := 0; i < 1000; i++ { - q.Remove() - if q.Length() != 1000-i-1 { - t.Error("removing: queue with", 1000-i-i, "elements has length", q.Length()) - } - } -} - -func TestQueueGet(t *testing.T) { - q := New() - - for i := 0; i < 1000; i++ { - q.Add(i) - for j := 0; j < q.Length(); j++ { - if q.Get(j).(int) != j { - t.Errorf("index %d doesn't contain %d", j, j) - } - } - } -} - -func TestQueueGetOutOfRangePanics(t *testing.T) { - q := New() - - q.Add(1) - q.Add(2) - q.Add(3) - - assertPanics(t, "should panic when negative index", func() { - q.Get(-1) - }) - - assertPanics(t, "should panic when index greater than length", func() { - q.Get(4) - }) -} - -func TestQueuePeekOutOfRangePanics(t *testing.T) { - q := New() - - assertPanics(t, "should panic when peeking empty queue", func() { - q.Peek() - }) - - q.Add(1) - q.Remove() - - assertPanics(t, "should panic when peeking emptied queue", func() { - q.Peek() - }) -} - -func TestQueueRemoveOutOfRangePanics(t *testing.T) { - q := New() - - assertPanics(t, "should panic when removing empty queue", func() { - q.Remove() - }) - - q.Add(1) - q.Remove() - - assertPanics(t, "should panic when removing emptied queue", func() { - q.Remove() - }) -} - -func assertPanics(t *testing.T, name string, f func()) { - defer func() { - if r := recover(); r == nil { - t.Errorf("%s: didn't panic as expected", name) - } - }() - - f() -} - -// General warning: Go's benchmark utility (go test -bench .) increases the number of -// iterations until the benchmarks take a reasonable amount of time to run; memory usage -// is *NOT* considered. On my machine, these benchmarks hit around ~1GB before they've had -// enough, but if you have less than that available and start swapping, then all bets are off. - -func BenchmarkQueueSerial(b *testing.B) { - q := New() - for i := 0; i < b.N; i++ { - q.Add(nil) - } - for i := 0; i < b.N; i++ { - q.Peek() - q.Remove() - } -} - -func BenchmarkQueueGet(b *testing.B) { - q := New() - for i := 0; i < b.N; i++ { - q.Add(i) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - q.Get(i) - } -} - -func BenchmarkQueueTickTock(b *testing.B) { - q := New() - for i := 0; i < b.N; i++ { - q.Add(nil) - q.Peek() - q.Remove() - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.gitignore b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.gitignore deleted file mode 100644 index 5f6b48eae..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# temporary symlink for testing -testing/data/symlink diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml deleted file mode 100644 index d062464f2..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -sudo: required -go: - - 1.3.3 - - 1.4.2 - - 1.5.1 - - tip -env: - - GOARCH=amd64 - - GOARCH=386 -script: - - make test - - DOCKER_HOST=tcp://127.0.0.1:2375 make integration -services: - - docker diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS deleted file mode 100644 index c31304faf..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS +++ /dev/null @@ -1,108 +0,0 @@ -# This is the official list of go-dockerclient authors for copyright purposes. - -Abhishek Chanda -Adam Bell-Hanssen -Adrien Kohlbecker -Aldrin Leal -Andreas Jaekle -Andrews Medina -Artem Sidorenko -Andy Goldstein -Ben Marini -Ben McCann -Brendan Fosberry -Brian Lalor -Brian Palmer -Bryan Boreham -Burke Libbey -Carlos Diaz-Padron -Cesar Wong -Cezar Sa Espinola -Cheah Chu Yeow -cheneydeng -Chris Bednarski -CMGS -Craig Jellick -Dan Williams -Daniel, Dao Quang Minh -Daniel Garcia -Darren Shepherd -Dave Choi -David Huie -Dawn Chen -Dinesh Subhraveti -Ed -Elias G. Schneevoigt -Erez Horev -Eric Anderson -Ewout Prangsma -Fabio Rehm -Fatih Arslan -Flavia Missi -Francisco Souza -Grégoire Delattre -Guillermo Álvarez Fernández -He Simei -Ivan Mikushin -James Bardin -Jari Kolehmainen -Jason Wilder -Jawher Moussa -Jean-Baptiste Dalido -Jeff Mitchell -Jeffrey Hulten -Jen Andre -Johan Euphrosine -Kamil Domanski -Karan Misra -Kim, Hirokuni -Kyle Allan -Liron Levin -Liu Peng -Lorenz Leutgeb -Lucas Clemente -Lucas Weiblen -Lyon Hill -Mantas Matelis -Martin Sweeney -Máximo Cuadros Ortiz -Michael Schmatz -Michal Fojtik -Mike Dillon -Mrunal Patel -Nick Ethier -Omeid Matten -Orivej Desh -Paul Bellamy -Paul Morie -Paul Weil -Peter Edge -Peter Jihoon Kim -Phil Lu -Philippe Lafoucrière -Rafe Colton -Rob Miller -Robert Williamson -Salvador Gironès -Sam Rijs -Sami Wagiaalla -Samuel Karp -Silas Sewell -Simon Eskildsen -Simon Menke -Skolos -Soulou -Sridhar Ratnakumar -Summer Mousa -Sunjin Lee -Tarsis Azevedo -Tim Schindler -Tobi Knaup -Tonic -ttyh061 -Victor Marmol -Vincenzo Prignano -Wiliam Souza -Ye Yin -Yu, Zou -Yuriy Bogdanov diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE deleted file mode 100644 index 706634474..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE +++ /dev/null @@ -1,6 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -You can find the Docker license at the following link: -https://raw.githubusercontent.com/docker/docker/master/LICENSE diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE deleted file mode 100644 index 4e11de100..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2015, go-dockerclient authors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile deleted file mode 100644 index 4d5d84067..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile +++ /dev/null @@ -1,51 +0,0 @@ -.PHONY: \ - all \ - vendor \ - lint \ - vet \ - fmt \ - fmtcheck \ - pretest \ - test \ - integration \ - cov \ - clean - -SRCS = $(shell git ls-files '*.go' | grep -v '^external/') -PKGS = ./. ./testing - -all: test - -vendor: - @ go get -v github.com/mjibson/party - party -d external -c -u - -lint: - @ go get -v github.com/golang/lint/golint - $(foreach file,$(SRCS),golint $(file) || exit;) - -vet: - @-go get -v golang.org/x/tools/cmd/vet - $(foreach pkg,$(PKGS),go vet $(pkg);) - -fmt: - gofmt -w $(SRCS) - -fmtcheck: - $(foreach file,$(SRCS),gofmt -d $(file);) - -pretest: lint vet fmtcheck - -test: pretest - $(foreach pkg,$(PKGS),go test $(pkg) || exit;) - -integration: - go test -tags docker_integration -run TestIntegration -v - -cov: - @ go get -v github.com/axw/gocov/gocov - @ go get golang.org/x/tools/cmd/cover - gocov test | gocov report - -clean: - $(foreach pkg,$(PKGS),go clean $(pkg) || exit;) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown deleted file mode 100644 index 1672099f7..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown +++ /dev/null @@ -1,106 +0,0 @@ -# go-dockerclient - -[![Drone](https://drone.io/github.com/fsouza/go-dockerclient/status.png)](https://drone.io/github.com/fsouza/go-dockerclient/latest) -[![Travis](https://img.shields.io/travis/fsouza/go-dockerclient.svg?style=flat-square)](https://travis-ci.org/fsouza/go-dockerclient) -[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient) - -This package presents a client for the Docker remote API. It also provides -support for the extensions in the [Swarm API](https://docs.docker.com/swarm/api/swarm-api/). - -This package also provides support for docker's network API, which is a simple -passthrough to the libnetwork remote API. Note that docker's network API is -only available in docker 1.8 and above, and only enabled in docker if -DOCKER_EXPERIMENTAL is defined during the docker build process. - -For more details, check the [remote API documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/). - -## Vendoring - -If you are having issues with Go 1.5 and have `GO15VENDOREXPERIMENT` set with an application that has go-dockerclient vendored, -please update your vendoring of go-dockerclient :) We recently moved the `vendor` directory to `external` so that go-dockerclient -is compatible with this configuration. See [338](https://github.com/fsouza/go-dockerclient/issues/338) and [339](https://github.com/fsouza/go-dockerclient/pull/339) -for details. - -## Example - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - endpoint := "unix:///var/run/docker.sock" - client, _ := docker.NewClient(endpoint) - imgs, _ := client.ListImages(docker.ListImagesOptions{All: false}) - for _, img := range imgs { - fmt.Println("ID: ", img.ID) - fmt.Println("RepoTags: ", img.RepoTags) - fmt.Println("Created: ", img.Created) - fmt.Println("Size: ", img.Size) - fmt.Println("VirtualSize: ", img.VirtualSize) - fmt.Println("ParentId: ", img.ParentID) - } -} -``` - -## Using with TLS - -In order to instantiate the client for a TLS-enabled daemon, you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters. - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - endpoint := "tcp://[ip]:[port]" - path := os.Getenv("DOCKER_CERT_PATH") - ca := fmt.Sprintf("%s/ca.pem", path) - cert := fmt.Sprintf("%s/cert.pem", path) - key := fmt.Sprintf("%s/key.pem", path) - client, _ := docker.NewTLSClient(endpoint, cert, key, ca) - // use client -} -``` - -If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables -`DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH`, you can use NewClientFromEnv. - - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - client, _ := docker.NewClientFromEnv() - // use client -} -``` - -See the documentation for more details. - -## Developing - -All development commands can be seen in the [Makefile](Makefile). - -Commited code must pass: - -* [golint](https://github.com/golang/lint) -* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) -* [gofmt](https://golang.org/cmd/gofmt) -* [go test](https://golang.org/cmd/go/#hdr-Test_packages) - -Running `make test` will check all of these. If your editor does not automatically call gofmt, `make fmt` will format all go files in this repository. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go deleted file mode 100644 index 30e3af3eb..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "path" - "strings" -) - -// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed. -var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg") - -// AuthConfiguration represents authentication options to use in the PushImage -// method. It represents the authentication in the Docker index server. -type AuthConfiguration struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Email string `json:"email,omitempty"` - ServerAddress string `json:"serveraddress,omitempty"` -} - -// AuthConfigurations represents authentication options to use for the -// PushImage method accommodating the new X-Registry-Config header -type AuthConfigurations struct { - Configs map[string]AuthConfiguration `json:"configs"` -} - -// AuthConfigurations119 is used to serialize a set of AuthConfigurations -// for Docker API >= 1.19. -type AuthConfigurations119 map[string]AuthConfiguration - -// dockerConfig represents a registry authentation configuration from the -// .dockercfg file. -type dockerConfig struct { - Auth string `json:"auth"` - Email string `json:"email"` -} - -// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from the -// ~/.dockercfg file. -func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) { - var r io.Reader - var err error - p := path.Join(os.Getenv("HOME"), ".docker", "config.json") - r, err = os.Open(p) - if err != nil { - p := path.Join(os.Getenv("HOME"), ".dockercfg") - r, err = os.Open(p) - if err != nil { - return nil, err - } - } - return NewAuthConfigurations(r) -} - -// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the -// same format as the .dockercfg file. -func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) { - var auth *AuthConfigurations - confs, err := parseDockerConfig(r) - if err != nil { - return nil, err - } - auth, err = authConfigs(confs) - if err != nil { - return nil, err - } - return auth, nil -} - -func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) { - buf := new(bytes.Buffer) - buf.ReadFrom(r) - byteData := buf.Bytes() - - var confsWrapper map[string]map[string]dockerConfig - if err := json.Unmarshal(byteData, &confsWrapper); err == nil { - if confs, ok := confsWrapper["auths"]; ok { - return confs, nil - } - } - - var confs map[string]dockerConfig - if err := json.Unmarshal(byteData, &confs); err != nil { - return nil, err - } - return confs, nil -} - -// authConfigs converts a dockerConfigs map to a AuthConfigurations object. -func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) { - c := &AuthConfigurations{ - Configs: make(map[string]AuthConfiguration), - } - for reg, conf := range confs { - data, err := base64.StdEncoding.DecodeString(conf.Auth) - if err != nil { - return nil, err - } - userpass := strings.Split(string(data), ":") - if len(userpass) != 2 { - return nil, ErrCannotParseDockercfg - } - c.Configs[reg] = AuthConfiguration{ - Email: conf.Email, - Username: userpass[0], - Password: userpass[1], - ServerAddress: reg, - } - } - return c, nil -} - -// AuthCheck validates the given credentials. It returns nil if successful. -// -// See https://goo.gl/m2SleN for more details. -func (c *Client) AuthCheck(conf *AuthConfiguration) error { - if conf == nil { - return fmt.Errorf("conf is nil") - } - resp, err := c.do("POST", "/auth", doOptions{data: conf}) - if err != nil { - return err - } - resp.Body.Close() - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go deleted file mode 100644 index d133594d4..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import "fmt" - -// ChangeType is a type for constants indicating the type of change -// in a container -type ChangeType int - -const ( - // ChangeModify is the ChangeType for container modifications - ChangeModify ChangeType = iota - - // ChangeAdd is the ChangeType for additions to a container - ChangeAdd - - // ChangeDelete is the ChangeType for deletions from a container - ChangeDelete -) - -// Change represents a change in a container. -// -// See https://goo.gl/9GsTIF for more details. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - var kind string - switch change.Kind { - case ChangeModify: - kind = "C" - case ChangeAdd: - kind = "A" - case ChangeDelete: - kind = "D" - } - return fmt.Sprintf("%s %s", kind, change.Path) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go deleted file mode 100644 index f7a3905d0..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go +++ /dev/null @@ -1,882 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package docker provides a client for the Docker remote API. -// -// See https://goo.gl/G3plxW for more details on the remote API. -package docker - -import ( - "bufio" - "bytes" - "crypto/tls" - "crypto/x509" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "path/filepath" - "reflect" - "runtime" - "strconv" - "strings" - "time" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy" - "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp" -) - -const userAgent = "go-dockerclient" - -var ( - // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL. - ErrInvalidEndpoint = errors.New("invalid endpoint") - - // ErrConnectionRefused is returned when the client cannot connect to the given endpoint. - ErrConnectionRefused = errors.New("cannot connect to Docker endpoint") - - apiVersion112, _ = NewAPIVersion("1.12") - - apiVersion119, _ = NewAPIVersion("1.19") -) - -// APIVersion is an internal representation of a version of the Remote API. -type APIVersion []int - -// NewAPIVersion returns an instance of APIVersion for the given string. -// -// The given string must be in the form .., where , -// and are integer numbers. -func NewAPIVersion(input string) (APIVersion, error) { - if !strings.Contains(input, ".") { - return nil, fmt.Errorf("Unable to parse version %q", input) - } - arr := strings.Split(input, ".") - ret := make(APIVersion, len(arr)) - var err error - for i, val := range arr { - ret[i], err = strconv.Atoi(val) - if err != nil { - return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val) - } - } - return ret, nil -} - -func (version APIVersion) String() string { - var str string - for i, val := range version { - str += strconv.Itoa(val) - if i < len(version)-1 { - str += "." - } - } - return str -} - -// LessThan is a function for comparing APIVersion structs -func (version APIVersion) LessThan(other APIVersion) bool { - return version.compare(other) < 0 -} - -// LessThanOrEqualTo is a function for comparing APIVersion structs -func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool { - return version.compare(other) <= 0 -} - -// GreaterThan is a function for comparing APIVersion structs -func (version APIVersion) GreaterThan(other APIVersion) bool { - return version.compare(other) > 0 -} - -// GreaterThanOrEqualTo is a function for comparing APIVersion structs -func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool { - return version.compare(other) >= 0 -} - -func (version APIVersion) compare(other APIVersion) int { - for i, v := range version { - if i <= len(other)-1 { - otherVersion := other[i] - - if v < otherVersion { - return -1 - } else if v > otherVersion { - return 1 - } - } - } - if len(version) > len(other) { - return 1 - } - if len(version) < len(other) { - return -1 - } - return 0 -} - -// Client is the basic type of this package. It provides methods for -// interaction with the API. -type Client struct { - SkipServerVersionCheck bool - HTTPClient *http.Client - TLSConfig *tls.Config - Dialer *net.Dialer - - endpoint string - endpointURL *url.URL - eventMonitor *eventMonitoringState - requestedAPIVersion APIVersion - serverAPIVersion APIVersion - expectedAPIVersion APIVersion - unixHTTPClient *http.Client -} - -// NewClient returns a Client instance ready for communication with the given -// server endpoint. It will use the latest remote API version available in the -// server. -func NewClient(endpoint string) (*Client, error) { - client, err := NewVersionedClient(endpoint, "") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewTLSClient returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates . It will use the latest remote API version -// available in the server. -func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) { - client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates (passed inline to the function as opposed to being -// read from a local file). It will use the latest remote API version available in the server. -func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) { - client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewVersionedClient returns a Client instance ready for communication with -// the given server endpoint, using a specific remote API version. -func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) { - u, err := parseEndpoint(endpoint, false) - if err != nil { - return nil, err - } - var requestedAPIVersion APIVersion - if strings.Contains(apiVersionString, ".") { - requestedAPIVersion, err = NewAPIVersion(apiVersionString) - if err != nil { - return nil, err - } - } - return &Client{ - HTTPClient: cleanhttp.DefaultClient(), - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - eventMonitor: new(eventMonitoringState), - requestedAPIVersion: requestedAPIVersion, - }, nil -} - -// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient. -func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { - return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString) -} - -// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates, using a specific remote API version. -func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { - certPEMBlock, err := ioutil.ReadFile(cert) - if err != nil { - return nil, err - } - keyPEMBlock, err := ioutil.ReadFile(key) - if err != nil { - return nil, err - } - caPEMCert, err := ioutil.ReadFile(ca) - if err != nil { - return nil, err - } - return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString) -} - -// NewClientFromEnv returns a Client instance ready for communication created from -// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH. -// -// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. -// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. -func NewClientFromEnv() (*Client, error) { - client, err := NewVersionedClientFromEnv("") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from -// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH, -// and using a specific remote API version. -// -// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. -// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. -func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) { - dockerEnv, err := getDockerEnv() - if err != nil { - return nil, err - } - dockerHost := dockerEnv.dockerHost - if dockerEnv.dockerTLSVerify { - parts := strings.SplitN(dockerEnv.dockerHost, "://", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost) - } - cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem") - key := filepath.Join(dockerEnv.dockerCertPath, "key.pem") - ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem") - return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString) - } - return NewVersionedClient(dockerEnv.dockerHost, apiVersionString) -} - -// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates (passed inline to the function as opposed to being -// read from a local file), using a specific remote API version. -func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) { - u, err := parseEndpoint(endpoint, true) - if err != nil { - return nil, err - } - var requestedAPIVersion APIVersion - if strings.Contains(apiVersionString, ".") { - requestedAPIVersion, err = NewAPIVersion(apiVersionString) - if err != nil { - return nil, err - } - } - if certPEMBlock == nil || keyPEMBlock == nil { - return nil, errors.New("Both cert and key are required") - } - tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) - if err != nil { - return nil, err - } - tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}} - if caPEMCert == nil { - tlsConfig.InsecureSkipVerify = true - } else { - caPool := x509.NewCertPool() - if !caPool.AppendCertsFromPEM(caPEMCert) { - return nil, errors.New("Could not add RootCA pem") - } - tlsConfig.RootCAs = caPool - } - tr := cleanhttp.DefaultTransport() - tr.TLSClientConfig = tlsConfig - if err != nil { - return nil, err - } - return &Client{ - HTTPClient: &http.Client{Transport: tr}, - TLSConfig: tlsConfig, - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - eventMonitor: new(eventMonitoringState), - requestedAPIVersion: requestedAPIVersion, - }, nil -} - -func (c *Client) checkAPIVersion() error { - serverAPIVersionString, err := c.getServerAPIVersionString() - if err != nil { - return err - } - c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString) - if err != nil { - return err - } - if c.requestedAPIVersion == nil { - c.expectedAPIVersion = c.serverAPIVersion - } else { - c.expectedAPIVersion = c.requestedAPIVersion - } - return nil -} - -// Endpoint returns the current endpoint. It's useful for getting the endpoint -// when using functions that get this data from the environment (like -// NewClientFromEnv. -func (c *Client) Endpoint() string { - return c.endpoint -} - -// Ping pings the docker server -// -// See https://goo.gl/kQCfJj for more details. -func (c *Client) Ping() error { - path := "/_ping" - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK { - return newError(resp) - } - resp.Body.Close() - return nil -} - -func (c *Client) getServerAPIVersionString() (version string, err error) { - resp, err := c.do("GET", "/version", doOptions{}) - if err != nil { - return "", err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode) - } - var versionResponse map[string]interface{} - if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil { - return "", err - } - if version, ok := (versionResponse["ApiVersion"]).(string); ok { - return version, nil - } - return "", nil -} - -type doOptions struct { - data interface{} - forceJSON bool - headers map[string]string -} - -func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) { - var params io.Reader - if doOptions.data != nil || doOptions.forceJSON { - buf, err := json.Marshal(doOptions.data) - if err != nil { - return nil, err - } - params = bytes.NewBuffer(buf) - } - if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { - err := c.checkAPIVersion() - if err != nil { - return nil, err - } - } - httpClient := c.HTTPClient - protocol := c.endpointURL.Scheme - var u string - if protocol == "unix" { - httpClient = c.unixClient() - u = c.getFakeUnixURL(path) - } else { - u = c.getURL(path) - } - req, err := http.NewRequest(method, u, params) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", userAgent) - if doOptions.data != nil { - req.Header.Set("Content-Type", "application/json") - } else if method == "POST" { - req.Header.Set("Content-Type", "plain/text") - } - - for k, v := range doOptions.headers { - req.Header.Set(k, v) - } - resp, err := httpClient.Do(req) - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return nil, ErrConnectionRefused - } - return nil, err - } - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, newError(resp) - } - return resp, nil -} - -type streamOptions struct { - setRawTerminal bool - rawJSONStream bool - useJSONDecoder bool - headers map[string]string - in io.Reader - stdout io.Writer - stderr io.Writer - // timeout is the inital connection timeout - timeout time.Duration -} - -func (c *Client) stream(method, path string, streamOptions streamOptions) error { - if (method == "POST" || method == "PUT") && streamOptions.in == nil { - streamOptions.in = bytes.NewReader(nil) - } - if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { - err := c.checkAPIVersion() - if err != nil { - return err - } - } - req, err := http.NewRequest(method, c.getURL(path), streamOptions.in) - if err != nil { - return err - } - req.Header.Set("User-Agent", userAgent) - if method == "POST" { - req.Header.Set("Content-Type", "plain/text") - } - for key, val := range streamOptions.headers { - req.Header.Set(key, val) - } - var resp *http.Response - protocol := c.endpointURL.Scheme - address := c.endpointURL.Path - if streamOptions.stdout == nil { - streamOptions.stdout = ioutil.Discard - } - if streamOptions.stderr == nil { - streamOptions.stderr = ioutil.Discard - } - if protocol == "unix" { - dial, err := c.Dialer.Dial(protocol, address) - if err != nil { - return err - } - defer dial.Close() - breader := bufio.NewReader(dial) - err = req.Write(dial) - if err != nil { - return err - } - - // ReadResponse may hang if server does not replay - if streamOptions.timeout > 0 { - dial.SetDeadline(time.Now().Add(streamOptions.timeout)) - } - - if resp, err = http.ReadResponse(breader, req); err != nil { - // Cancel timeout for future I/O operations - if streamOptions.timeout > 0 { - dial.SetDeadline(time.Time{}) - } - if strings.Contains(err.Error(), "connection refused") { - return ErrConnectionRefused - } - return err - } - } else { - if resp, err = c.HTTPClient.Do(req); err != nil { - if strings.Contains(err.Error(), "connection refused") { - return ErrConnectionRefused - } - return err - } - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return newError(resp) - } - if streamOptions.useJSONDecoder || resp.Header.Get("Content-Type") == "application/json" { - // if we want to get raw json stream, just copy it back to output - // without decoding it - if streamOptions.rawJSONStream { - _, err = io.Copy(streamOptions.stdout, resp.Body) - return err - } - dec := json.NewDecoder(resp.Body) - for { - var m jsonMessage - if err := dec.Decode(&m); err == io.EOF { - break - } else if err != nil { - return err - } - if m.Stream != "" { - fmt.Fprint(streamOptions.stdout, m.Stream) - } else if m.Progress != "" { - fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress) - } else if m.Error != "" { - return errors.New(m.Error) - } - if m.Status != "" { - fmt.Fprintln(streamOptions.stdout, m.Status) - } - } - } else { - if streamOptions.setRawTerminal { - _, err = io.Copy(streamOptions.stdout, resp.Body) - } else { - _, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body) - } - return err - } - return nil -} - -type hijackOptions struct { - success chan struct{} - setRawTerminal bool - in io.Reader - stdout io.Writer - stderr io.Writer - data interface{} -} - -func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error { - if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { - err := c.checkAPIVersion() - if err != nil { - return err - } - } - var params io.Reader - if hijackOptions.data != nil { - buf, err := json.Marshal(hijackOptions.data) - if err != nil { - return err - } - params = bytes.NewBuffer(buf) - } - req, err := http.NewRequest(method, c.getURL(path), params) - if err != nil { - return err - } - req.Header.Set("Content-Type", "plain/text") - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "tcp") - protocol := c.endpointURL.Scheme - address := c.endpointURL.Path - if protocol != "unix" { - protocol = "tcp" - address = c.endpointURL.Host - } - var dial net.Conn - if c.TLSConfig != nil && protocol != "unix" { - dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig) - if err != nil { - return err - } - } else { - dial, err = c.Dialer.Dial(protocol, address) - if err != nil { - return err - } - } - clientconn := httputil.NewClientConn(dial, nil) - defer clientconn.Close() - clientconn.Do(req) - if hijackOptions.success != nil { - hijackOptions.success <- struct{}{} - <-hijackOptions.success - } - rwc, br := clientconn.Hijack() - defer rwc.Close() - errChanOut := make(chan error, 1) - errChanIn := make(chan error, 1) - if hijackOptions.stdout == nil && hijackOptions.stderr == nil { - close(errChanOut) - } else { - // Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set. - // Otherwise, if the only stream you care about is stdin, your attach session - // will "hang" until the container terminates, even though you're not reading - // stdout/stderr - if hijackOptions.stdout == nil { - hijackOptions.stdout = ioutil.Discard - } - if hijackOptions.stderr == nil { - hijackOptions.stderr = ioutil.Discard - } - - go func() { - defer func() { - if hijackOptions.in != nil { - if closer, ok := hijackOptions.in.(io.Closer); ok { - closer.Close() - } - errChanIn <- nil - } - }() - - var err error - if hijackOptions.setRawTerminal { - _, err = io.Copy(hijackOptions.stdout, br) - } else { - _, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br) - } - errChanOut <- err - }() - } - go func() { - var err error - if hijackOptions.in != nil { - _, err = io.Copy(rwc, hijackOptions.in) - } - errChanIn <- err - rwc.(interface { - CloseWrite() error - }).CloseWrite() - }() - errIn := <-errChanIn - errOut := <-errChanOut - if errIn != nil { - return errIn - } - return errOut -} - -func (c *Client) getURL(path string) string { - urlStr := strings.TrimRight(c.endpointURL.String(), "/") - if c.endpointURL.Scheme == "unix" { - urlStr = "" - } - if c.requestedAPIVersion != nil { - return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path) - } - return fmt.Sprintf("%s%s", urlStr, path) -} - -// getFakeUnixURL returns the URL needed to make an HTTP request over a UNIX -// domain socket to the given path. -func (c *Client) getFakeUnixURL(path string) string { - u := *c.endpointURL // Copy. - - // Override URL so that net/http will not complain. - u.Scheme = "http" - u.Host = "unix.sock" // Doesn't matter what this is - it's not used. - u.Path = "" - urlStr := strings.TrimRight(u.String(), "/") - if c.requestedAPIVersion != nil { - return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path) - } - return fmt.Sprintf("%s%s", urlStr, path) -} - -func (c *Client) unixClient() *http.Client { - if c.unixHTTPClient != nil { - return c.unixHTTPClient - } - socketPath := c.endpointURL.Path - c.unixHTTPClient = &http.Client{ - Transport: &http.Transport{ - Dial: func(network, addr string) (net.Conn, error) { - return c.Dialer.Dial("unix", socketPath) - }, - }, - } - return c.unixHTTPClient -} - -type jsonMessage struct { - Status string `json:"status,omitempty"` - Progress string `json:"progress,omitempty"` - Error string `json:"error,omitempty"` - Stream string `json:"stream,omitempty"` -} - -func queryString(opts interface{}) string { - if opts == nil { - return "" - } - value := reflect.ValueOf(opts) - if value.Kind() == reflect.Ptr { - value = value.Elem() - } - if value.Kind() != reflect.Struct { - return "" - } - items := url.Values(map[string][]string{}) - for i := 0; i < value.NumField(); i++ { - field := value.Type().Field(i) - if field.PkgPath != "" { - continue - } - key := field.Tag.Get("qs") - if key == "" { - key = strings.ToLower(field.Name) - } else if key == "-" { - continue - } - addQueryStringValue(items, key, value.Field(i)) - } - return items.Encode() -} - -func addQueryStringValue(items url.Values, key string, v reflect.Value) { - switch v.Kind() { - case reflect.Bool: - if v.Bool() { - items.Add(key, "1") - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if v.Int() > 0 { - items.Add(key, strconv.FormatInt(v.Int(), 10)) - } - case reflect.Float32, reflect.Float64: - if v.Float() > 0 { - items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) - } - case reflect.String: - if v.String() != "" { - items.Add(key, v.String()) - } - case reflect.Ptr: - if !v.IsNil() { - if b, err := json.Marshal(v.Interface()); err == nil { - items.Add(key, string(b)) - } - } - case reflect.Map: - if len(v.MapKeys()) > 0 { - if b, err := json.Marshal(v.Interface()); err == nil { - items.Add(key, string(b)) - } - } - case reflect.Array, reflect.Slice: - vLen := v.Len() - if vLen > 0 { - for i := 0; i < vLen; i++ { - addQueryStringValue(items, key, v.Index(i)) - } - } - } -} - -// Error represents failures in the API. It represents a failure from the API. -type Error struct { - Status int - Message string -} - -func newError(resp *http.Response) *Error { - defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)} - } - return &Error{Status: resp.StatusCode, Message: string(data)} -} - -func (e *Error) Error() string { - return fmt.Sprintf("API error (%d): %s", e.Status, e.Message) -} - -func parseEndpoint(endpoint string, tls bool) (*url.URL, error) { - u, err := url.Parse(endpoint) - if err != nil { - return nil, ErrInvalidEndpoint - } - if tls { - u.Scheme = "https" - } - switch u.Scheme { - case "unix": - return u, nil - case "http", "https", "tcp": - _, port, err := net.SplitHostPort(u.Host) - if err != nil { - if e, ok := err.(*net.AddrError); ok { - if e.Err == "missing port in address" { - return u, nil - } - } - return nil, ErrInvalidEndpoint - } - number, err := strconv.ParseInt(port, 10, 64) - if err == nil && number > 0 && number < 65536 { - if u.Scheme == "tcp" { - if tls { - u.Scheme = "https" - } else { - u.Scheme = "http" - } - } - return u, nil - } - return nil, ErrInvalidEndpoint - default: - return nil, ErrInvalidEndpoint - } -} - -type dockerEnv struct { - dockerHost string - dockerTLSVerify bool - dockerCertPath string -} - -func getDockerEnv() (*dockerEnv, error) { - dockerHost := os.Getenv("DOCKER_HOST") - var err error - if dockerHost == "" { - dockerHost, err = DefaultDockerHost() - if err != nil { - return nil, err - } - } - dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != "" - var dockerCertPath string - if dockerTLSVerify { - dockerCertPath = os.Getenv("DOCKER_CERT_PATH") - if dockerCertPath == "" { - home := homedir.Get() - if home == "" { - return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set") - } - dockerCertPath = filepath.Join(home, ".docker") - dockerCertPath, err = filepath.Abs(dockerCertPath) - if err != nil { - return nil, err - } - } - } - return &dockerEnv{ - dockerHost: dockerHost, - dockerTLSVerify: dockerTLSVerify, - dockerCertPath: dockerCertPath, - }, nil -} - -// DefaultDockerHost returns the default docker socket for the current OS -func DefaultDockerHost() (string, error) { - var defaultHost string - if runtime.GOOS == "windows" { - // If we do not have a host, default to TCP socket on Windows - defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort) - } else { - // If we do not have a host, default to unix socket - defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket) - } - return opts.ValidateHost(defaultHost) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go deleted file mode 100644 index faf12632f..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go +++ /dev/null @@ -1,1141 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// ErrContainerAlreadyExists is the error returned by CreateContainer when the -// container already exists. -var ErrContainerAlreadyExists = errors.New("container already exists") - -// ListContainersOptions specify parameters to the ListContainers function. -// -// See https://goo.gl/47a6tO for more details. -type ListContainersOptions struct { - All bool - Size bool - Limit int - Since string - Before string - Filters map[string][]string -} - -// APIPort is a type that represents a port mapping returned by the Docker API -type APIPort struct { - PrivatePort int64 `json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty"` - PublicPort int64 `json:"PublicPort,omitempty" yaml:"PublicPort,omitempty"` - Type string `json:"Type,omitempty" yaml:"Type,omitempty"` - IP string `json:"IP,omitempty" yaml:"IP,omitempty"` -} - -// APIContainers represents each container in the list returned by -// ListContainers. -type APIContainers struct { - ID string `json:"Id" yaml:"Id"` - Image string `json:"Image,omitempty" yaml:"Image,omitempty"` - Command string `json:"Command,omitempty" yaml:"Command,omitempty"` - Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` - Status string `json:"Status,omitempty" yaml:"Status,omitempty"` - Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty"` - SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty"` - SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty"` - Names []string `json:"Names,omitempty" yaml:"Names,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels, omitempty"` -} - -// ListContainers returns a slice of containers matching the given criteria. -// -// See https://goo.gl/47a6tO for more details. -func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) { - path := "/containers/json?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var containers []APIContainers - if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil { - return nil, err - } - return containers, nil -} - -// Port represents the port number and the protocol, in the form -// /. For example: 80/tcp. -type Port string - -// Port returns the number of the port. -func (p Port) Port() string { - return strings.Split(string(p), "/")[0] -} - -// Proto returns the name of the protocol. -func (p Port) Proto() string { - parts := strings.Split(string(p), "/") - if len(parts) == 1 { - return "tcp" - } - return parts[1] -} - -// State represents the state of a container. -type State struct { - Running bool `json:"Running,omitempty" yaml:"Running,omitempty"` - Paused bool `json:"Paused,omitempty" yaml:"Paused,omitempty"` - Restarting bool `json:"Restarting,omitempty" yaml:"Restarting,omitempty"` - OOMKilled bool `json:"OOMKilled,omitempty" yaml:"OOMKilled,omitempty"` - Pid int `json:"Pid,omitempty" yaml:"Pid,omitempty"` - ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"` - Error string `json:"Error,omitempty" yaml:"Error,omitempty"` - StartedAt time.Time `json:"StartedAt,omitempty" yaml:"StartedAt,omitempty"` - FinishedAt time.Time `json:"FinishedAt,omitempty" yaml:"FinishedAt,omitempty"` -} - -// String returns the string representation of a state. -func (s *State) String() string { - if s.Running { - if s.Paused { - return "paused" - } - return fmt.Sprintf("Up %s", time.Now().UTC().Sub(s.StartedAt)) - } - return fmt.Sprintf("Exit %d", s.ExitCode) -} - -// PortBinding represents the host/container port mapping as returned in the -// `docker inspect` json -type PortBinding struct { - HostIP string `json:"HostIP,omitempty" yaml:"HostIP,omitempty"` - HostPort string `json:"HostPort,omitempty" yaml:"HostPort,omitempty"` -} - -// PortMapping represents a deprecated field in the `docker inspect` output, -// and its value as found in NetworkSettings should always be nil -type PortMapping map[string]string - -// NetworkSettings contains network-related information about a container -type NetworkSettings struct { - IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"` - IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"` - MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` - Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"` - Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty"` - PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty"` - Ports map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty"` - NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty"` - EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty"` - SandboxKey string `json:"SandboxKey,omitempty" yaml:"SandboxKey,omitempty"` - GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty"` - GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty"` - IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty"` - LinkLocalIPv6Address string `json:"LinkLocalIPv6Address,omitempty" yaml:"LinkLocalIPv6Address,omitempty"` - LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen,omitempty" yaml:"LinkLocalIPv6PrefixLen,omitempty"` - SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty" yaml:"SecondaryIPAddresses,omitempty"` - SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty" yaml:"SecondaryIPv6Addresses,omitempty"` -} - -// PortMappingAPI translates the port mappings as contained in NetworkSettings -// into the format in which they would appear when returned by the API -func (settings *NetworkSettings) PortMappingAPI() []APIPort { - var mapping []APIPort - for port, bindings := range settings.Ports { - p, _ := parsePort(port.Port()) - if len(bindings) == 0 { - mapping = append(mapping, APIPort{ - PublicPort: int64(p), - Type: port.Proto(), - }) - continue - } - for _, binding := range bindings { - p, _ := parsePort(port.Port()) - h, _ := parsePort(binding.HostPort) - mapping = append(mapping, APIPort{ - PrivatePort: int64(p), - PublicPort: int64(h), - Type: port.Proto(), - IP: binding.HostIP, - }) - } - } - return mapping -} - -func parsePort(rawPort string) (int, error) { - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// Config is the list of configuration options used when creating a container. -// Config does not contain the options that are specific to starting a container on a -// given host. Those are contained in HostConfig -type Config struct { - Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"` - Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"` - User string `json:"User,omitempty" yaml:"User,omitempty"` - Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` - MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` - CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` - CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` - AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` - AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` - AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` - PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"` - ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"` - Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` - StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"` - Env []string `json:"Env,omitempty" yaml:"Env,omitempty"` - Cmd []string `json:"Cmd" yaml:"Cmd"` - DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only - Image string `json:"Image,omitempty" yaml:"Image,omitempty"` - Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` - VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"` - VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` - WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"` - MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` - Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"` - NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"` - SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"` - OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"` - Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` -} - -// Mount represents a mount point in the container. -// -// It has been added in the version 1.20 of the Docker API, available since -// Docker 1.8. -type Mount struct { - Source string - Destination string - Mode string - RW bool -} - -// LogConfig defines the log driver type and the configuration for it. -type LogConfig struct { - Type string `json:"Type,omitempty" yaml:"Type,omitempty"` - Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty"` -} - -// ULimit defines system-wide resource limitations -// This can help a lot in system administration, e.g. when a user starts too many processes and therefore makes the system unresponsive for other users. -type ULimit struct { - Name string `json:"Name,omitempty" yaml:"Name,omitempty"` - Soft int64 `json:"Soft,omitempty" yaml:"Soft,omitempty"` - Hard int64 `json:"Hard,omitempty" yaml:"Hard,omitempty"` -} - -// SwarmNode containers information about which Swarm node the container is on -type SwarmNode struct { - ID string `json:"ID,omitempty" yaml:"ID,omitempty"` - IP string `json:"IP,omitempty" yaml:"IP,omitempty"` - Addr string `json:"Addr,omitempty" yaml:"Addr,omitempty"` - Name string `json:"Name,omitempty" yaml:"Name,omitempty"` - CPUs int64 `json:"CPUs,omitempty" yaml:"CPUs,omitempty"` - Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` -} - -// Container is the type encompasing everything about a container - its config, -// hostconfig, etc. -type Container struct { - ID string `json:"Id" yaml:"Id"` - - Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"` - - Path string `json:"Path,omitempty" yaml:"Path,omitempty"` - Args []string `json:"Args,omitempty" yaml:"Args,omitempty"` - - Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"` - State State `json:"State,omitempty" yaml:"State,omitempty"` - Image string `json:"Image,omitempty" yaml:"Image,omitempty"` - - Node *SwarmNode `json:"Node,omitempty" yaml:"Node,omitempty"` - - NetworkSettings *NetworkSettings `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty"` - - SysInitPath string `json:"SysInitPath,omitempty" yaml:"SysInitPath,omitempty"` - ResolvConfPath string `json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty"` - HostnamePath string `json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty"` - HostsPath string `json:"HostsPath,omitempty" yaml:"HostsPath,omitempty"` - LogPath string `json:"LogPath,omitempty" yaml:"LogPath,omitempty"` - Name string `json:"Name,omitempty" yaml:"Name,omitempty"` - Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"` - Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"` - - Volumes map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` - VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty"` - HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"` - ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty"` - - RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty"` - - AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty"` -} - -// RenameContainerOptions specify parameters to the RenameContainer function. -// -// See https://goo.gl/laSOIy for more details. -type RenameContainerOptions struct { - // ID of container to rename - ID string `qs:"-"` - - // New name - Name string `json:"name,omitempty" yaml:"name,omitempty"` -} - -// RenameContainer updates and existing containers name -// -// See https://goo.gl/laSOIy for more details. -func (c *Client) RenameContainer(opts RenameContainerOptions) error { - resp, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{}) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// InspectContainer returns information about a container by its ID. -// -// See https://goo.gl/RdIq0b for more details. -func (c *Client) InspectContainer(id string) (*Container, error) { - path := "/containers/" + id + "/json" - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var container Container - if err := json.NewDecoder(resp.Body).Decode(&container); err != nil { - return nil, err - } - return &container, nil -} - -// ContainerChanges returns changes in the filesystem of the given container. -// -// See https://goo.gl/9GsTIF for more details. -func (c *Client) ContainerChanges(id string) ([]Change, error) { - path := "/containers/" + id + "/changes" - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var changes []Change - if err := json.NewDecoder(resp.Body).Decode(&changes); err != nil { - return nil, err - } - return changes, nil -} - -// CreateContainerOptions specify parameters to the CreateContainer function. -// -// See https://goo.gl/WxQzrr for more details. -type CreateContainerOptions struct { - Name string - Config *Config `qs:"-"` - HostConfig *HostConfig `qs:"-"` -} - -// CreateContainer creates a new container, returning the container instance, -// or an error in case of failure. -// -// See https://goo.gl/WxQzrr for more details. -func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) { - path := "/containers/create?" + queryString(opts) - resp, err := c.do( - "POST", - path, - doOptions{ - data: struct { - *Config - HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"` - }{ - opts.Config, - opts.HostConfig, - }, - }, - ) - - if e, ok := err.(*Error); ok { - if e.Status == http.StatusNotFound { - return nil, ErrNoSuchImage - } - if e.Status == http.StatusConflict { - return nil, ErrContainerAlreadyExists - } - } - - if err != nil { - return nil, err - } - defer resp.Body.Close() - var container Container - if err := json.NewDecoder(resp.Body).Decode(&container); err != nil { - return nil, err - } - - container.Name = opts.Name - - return &container, nil -} - -// KeyValuePair is a type for generic key/value pairs as used in the Lxc -// configuration -type KeyValuePair struct { - Key string `json:"Key,omitempty" yaml:"Key,omitempty"` - Value string `json:"Value,omitempty" yaml:"Value,omitempty"` -} - -// RestartPolicy represents the policy for automatically restarting a container. -// -// Possible values are: -// -// - always: the docker daemon will always restart the container -// - on-failure: the docker daemon will restart the container on failures, at -// most MaximumRetryCount times -// - no: the docker daemon will not restart the container automatically -type RestartPolicy struct { - Name string `json:"Name,omitempty" yaml:"Name,omitempty"` - MaximumRetryCount int `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty"` -} - -// AlwaysRestart returns a restart policy that tells the Docker daemon to -// always restart the container. -func AlwaysRestart() RestartPolicy { - return RestartPolicy{Name: "always"} -} - -// RestartOnFailure returns a restart policy that tells the Docker daemon to -// restart the container on failures, trying at most maxRetry times. -func RestartOnFailure(maxRetry int) RestartPolicy { - return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry} -} - -// NeverRestart returns a restart policy that tells the Docker daemon to never -// restart the container on failures. -func NeverRestart() RestartPolicy { - return RestartPolicy{Name: "no"} -} - -// Device represents a device mapping between the Docker host and the -// container. -type Device struct { - PathOnHost string `json:"PathOnHost,omitempty" yaml:"PathOnHost,omitempty"` - PathInContainer string `json:"PathInContainer,omitempty" yaml:"PathInContainer,omitempty"` - CgroupPermissions string `json:"CgroupPermissions,omitempty" yaml:"CgroupPermissions,omitempty"` -} - -// HostConfig contains the container options related to starting a container on -// a given host -type HostConfig struct { - Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty"` - CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty"` - CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty"` - GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty"` - ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"` - LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"` - Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"` - PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty"` - Links []string `json:"Links,omitempty" yaml:"Links,omitempty"` - PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"` - DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.10 and above only - DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty"` - ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"` - VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` - NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"` - IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"` - PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty"` - UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty"` - RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"` - Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"` - LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"` - ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"` - SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"` - CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty"` - Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` - MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` - MemorySwappiness int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty"` - OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable"` - CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` - CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` - CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty"` - CPUSetMEMs string `json:"CpusetMems,omitempty" yaml:"CpusetMems,omitempty"` - CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty"` - CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty"` - BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight"` - Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"` -} - -// StartContainer starts a container, returning an error in case of failure. -// -// See https://goo.gl/MrBAJv for more details. -func (c *Client) StartContainer(id string, hostConfig *HostConfig) error { - path := "/containers/" + id + "/start" - resp, err := c.do("POST", path, doOptions{data: hostConfig, forceJSON: true}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id, Err: err} - } - return err - } - if resp.StatusCode == http.StatusNotModified { - return &ContainerAlreadyRunning{ID: id} - } - resp.Body.Close() - return nil -} - -// StopContainer stops a container, killing it after the given timeout (in -// seconds). -// -// See https://goo.gl/USqsFt for more details. -func (c *Client) StopContainer(id string, timeout uint) error { - path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - return err - } - if resp.StatusCode == http.StatusNotModified { - return &ContainerNotRunning{ID: id} - } - resp.Body.Close() - return nil -} - -// RestartContainer stops a container, killing it after the given timeout (in -// seconds), during the stop process. -// -// See https://goo.gl/QzsDnz for more details. -func (c *Client) RestartContainer(id string, timeout uint) error { - path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - return err - } - resp.Body.Close() - return nil -} - -// PauseContainer pauses the given container. -// -// See https://goo.gl/OF7W9X for more details. -func (c *Client) PauseContainer(id string) error { - path := fmt.Sprintf("/containers/%s/pause", id) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - return err - } - resp.Body.Close() - return nil -} - -// UnpauseContainer unpauses the given container. -// -// See https://goo.gl/7dwyPA for more details. -func (c *Client) UnpauseContainer(id string) error { - path := fmt.Sprintf("/containers/%s/unpause", id) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - return err - } - resp.Body.Close() - return nil -} - -// TopResult represents the list of processes running in a container, as -// returned by /containers//top. -// -// See https://goo.gl/Rb46aY for more details. -type TopResult struct { - Titles []string - Processes [][]string -} - -// TopContainer returns processes running inside a container -// -// See https://goo.gl/Rb46aY for more details. -func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) { - var args string - var result TopResult - if psArgs != "" { - args = fmt.Sprintf("?ps_args=%s", psArgs) - } - path := fmt.Sprintf("/containers/%s/top%s", id, args) - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return result, &NoSuchContainer{ID: id} - } - return result, err - } - defer resp.Body.Close() - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { - return result, err - } - return result, nil -} - -// Stats represents container statistics, returned by /containers//stats. -// -// See https://goo.gl/GNmLHb for more details. -type Stats struct { - Read time.Time `json:"read,omitempty" yaml:"read,omitempty"` - Network struct { - RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty"` - RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty"` - RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty"` - TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty"` - TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty"` - RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty"` - TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty"` - TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty"` - } `json:"network,omitempty" yaml:"network,omitempty"` - MemoryStats struct { - Stats struct { - TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty"` - Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty"` - MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty"` - TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty"` - Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty"` - Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty"` - TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty"` - Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty"` - Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty"` - Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty"` - TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty"` - Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty"` - TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty"` - TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty"` - TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty"` - TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty"` - RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty"` - HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty"` - TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty"` - TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty"` - ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty"` - TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty"` - TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty"` - TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty"` - InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty"` - ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty"` - Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty"` - InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty"` - TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty"` - } `json:"stats,omitempty" yaml:"stats,omitempty"` - MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty"` - Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty"` - Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty"` - Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty"` - } `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty"` - BlkioStats struct { - IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty"` - IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty"` - IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty"` - IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty"` - IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty"` - IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty"` - IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty"` - SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty"` - } `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty"` - CPUStats CPUStats `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` -} - -// CPUStats is a stats entry for cpu stats -type CPUStats struct { - CPUUsage struct { - PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty"` - UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty"` - TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty"` - UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty"` - } `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty"` - SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty"` - ThrottlingData struct { - Periods uint64 `json:"periods,omitempty"` - ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` - ThrottledTime uint64 `json:"throttled_time,omitempty"` - } `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty"` -} - -// BlkioStatsEntry is a stats entry for blkio_stats -type BlkioStatsEntry struct { - Major uint64 `json:"major,omitempty" yaml:"major,omitempty"` - Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty"` - Op string `json:"op,omitempty" yaml:"op,omitempty"` - Value uint64 `json:"value,omitempty" yaml:"value,omitempty"` -} - -// StatsOptions specify parameters to the Stats function. -// -// See https://goo.gl/GNmLHb for more details. -type StatsOptions struct { - ID string - Stats chan<- *Stats - Stream bool - // A flag that enables stopping the stats operation - Done <-chan bool - // Initial connection timeout - Timeout time.Duration -} - -// Stats sends container statistics for the given container to the given channel. -// -// This function is blocking, similar to a streaming call for logs, and should be run -// on a separate goroutine from the caller. Note that this function will block until -// the given container is removed, not just exited. When finished, this function -// will close the given channel. Alternatively, function can be stopped by -// signaling on the Done channel. -// -// See https://goo.gl/GNmLHb for more details. -func (c *Client) Stats(opts StatsOptions) (retErr error) { - errC := make(chan error, 1) - readCloser, writeCloser := io.Pipe() - - defer func() { - close(opts.Stats) - - select { - case err := <-errC: - if err != nil && retErr == nil { - retErr = err - } - default: - // No errors - } - - if err := readCloser.Close(); err != nil && retErr == nil { - retErr = err - } - }() - - go func() { - err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{ - rawJSONStream: true, - useJSONDecoder: true, - stdout: writeCloser, - timeout: opts.Timeout, - }) - if err != nil { - dockerError, ok := err.(*Error) - if ok { - if dockerError.Status == http.StatusNotFound { - err = &NoSuchContainer{ID: opts.ID} - } - } - } - if closeErr := writeCloser.Close(); closeErr != nil && err == nil { - err = closeErr - } - errC <- err - close(errC) - }() - - quit := make(chan struct{}) - defer close(quit) - go func() { - // block here waiting for the signal to stop function - select { - case <-opts.Done: - readCloser.Close() - case <-quit: - return - } - }() - - decoder := json.NewDecoder(readCloser) - stats := new(Stats) - for err := decoder.Decode(stats); err != io.EOF; err = decoder.Decode(stats) { - if err != nil { - return err - } - opts.Stats <- stats - stats = new(Stats) - } - return nil -} - -// KillContainerOptions represents the set of options that can be used in a -// call to KillContainer. -// -// See https://goo.gl/hkS9i8 for more details. -type KillContainerOptions struct { - // The ID of the container. - ID string `qs:"-"` - - // The signal to send to the container. When omitted, Docker server - // will assume SIGKILL. - Signal Signal -} - -// KillContainer sends a signal to a container, returning an error in case of -// failure. -// -// See https://goo.gl/hkS9i8 for more details. -func (c *Client) KillContainer(opts KillContainerOptions) error { - path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: opts.ID} - } - return err - } - resp.Body.Close() - return nil -} - -// RemoveContainerOptions encapsulates options to remove a container. -// -// See https://goo.gl/RQyX62 for more details. -type RemoveContainerOptions struct { - // The ID of the container. - ID string `qs:"-"` - - // A flag that indicates whether Docker should remove the volumes - // associated to the container. - RemoveVolumes bool `qs:"v"` - - // A flag that indicates whether Docker should remove the container - // even if it is currently running. - Force bool -} - -// RemoveContainer removes a container, returning an error in case of failure. -// -// See https://goo.gl/RQyX62 for more details. -func (c *Client) RemoveContainer(opts RemoveContainerOptions) error { - path := "/containers/" + opts.ID + "?" + queryString(opts) - resp, err := c.do("DELETE", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: opts.ID} - } - return err - } - resp.Body.Close() - return nil -} - -// UploadToContainerOptions is the set of options that can be used when -// uploading an archive into a container. -// -// See https://goo.gl/Ss97HW for more details. -type UploadToContainerOptions struct { - InputStream io.Reader `json:"-" qs:"-"` - Path string `qs:"path"` - NoOverwriteDirNonDir bool `qs:"noOverwriteDirNonDir"` -} - -// UploadToContainer uploads a tar archive to be extracted to a path in the -// filesystem of the container. -// -// See https://goo.gl/Ss97HW for more details. -func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error { - url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) - - return c.stream("PUT", url, streamOptions{ - in: opts.InputStream, - }) -} - -// DownloadFromContainerOptions is the set of options that can be used when -// downloading resources from a container. -// -// See https://goo.gl/KnZJDX for more details. -type DownloadFromContainerOptions struct { - OutputStream io.Writer `json:"-" qs:"-"` - Path string `qs:"path"` -} - -// DownloadFromContainer downloads a tar archive of files or folders in a container. -// -// See https://goo.gl/KnZJDX for more details. -func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error { - url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) - - return c.stream("GET", url, streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - }) -} - -// CopyFromContainerOptions has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer. -// -// See https://goo.gl/R2jevW for more details. -type CopyFromContainerOptions struct { - OutputStream io.Writer `json:"-"` - Container string `json:"-"` - Resource string -} - -// CopyFromContainer has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer. -// -// See https://goo.gl/R2jevW for more details. -func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error { - if opts.Container == "" { - return &NoSuchContainer{ID: opts.Container} - } - url := fmt.Sprintf("/containers/%s/copy", opts.Container) - resp, err := c.do("POST", url, doOptions{data: opts}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchContainer{ID: opts.Container} - } - return err - } - defer resp.Body.Close() - _, err = io.Copy(opts.OutputStream, resp.Body) - return err -} - -// WaitContainer blocks until the given container stops, return the exit code -// of the container status. -// -// See https://goo.gl/Gc1rge for more details. -func (c *Client) WaitContainer(id string) (int, error) { - resp, err := c.do("POST", "/containers/"+id+"/wait", doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return 0, &NoSuchContainer{ID: id} - } - return 0, err - } - defer resp.Body.Close() - var r struct{ StatusCode int } - if err := json.NewDecoder(resp.Body).Decode(&r); err != nil { - return 0, err - } - return r.StatusCode, nil -} - -// CommitContainerOptions aggregates parameters to the CommitContainer method. -// -// See https://goo.gl/mqfoCw for more details. -type CommitContainerOptions struct { - Container string - Repository string `qs:"repo"` - Tag string - Message string `qs:"m"` - Author string - Run *Config `qs:"-"` -} - -// CommitContainer creates a new image from a container's changes. -// -// See https://goo.gl/mqfoCw for more details. -func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) { - path := "/commit?" + queryString(opts) - resp, err := c.do("POST", path, doOptions{data: opts.Run}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: opts.Container} - } - return nil, err - } - defer resp.Body.Close() - var image Image - if err := json.NewDecoder(resp.Body).Decode(&image); err != nil { - return nil, err - } - return &image, nil -} - -// AttachToContainerOptions is the set of options that can be used when -// attaching to a container. -// -// See https://goo.gl/NKpkFk for more details. -type AttachToContainerOptions struct { - Container string `qs:"-"` - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - ErrorStream io.Writer `qs:"-"` - - // Get container logs, sending it to OutputStream. - Logs bool - - // Stream the response? - Stream bool - - // Attach to stdin, and use InputStream. - Stdin bool - - // Attach to stdout, and use OutputStream. - Stdout bool - - // Attach to stderr, and use ErrorStream. - Stderr bool - - // If set, after a successful connect, a sentinel will be sent and then the - // client will block on receive before continuing. - // - // It must be an unbuffered channel. Using a buffered channel can lead - // to unexpected behavior. - Success chan struct{} - - // Use raw terminal? Usually true when the container contains a TTY. - RawTerminal bool `qs:"-"` -} - -// AttachToContainer attaches to a container, using the given options. -// -// See https://goo.gl/NKpkFk for more details. -func (c *Client) AttachToContainer(opts AttachToContainerOptions) error { - if opts.Container == "" { - return &NoSuchContainer{ID: opts.Container} - } - path := "/containers/" + opts.Container + "/attach?" + queryString(opts) - return c.hijack("POST", path, hijackOptions{ - success: opts.Success, - setRawTerminal: opts.RawTerminal, - in: opts.InputStream, - stdout: opts.OutputStream, - stderr: opts.ErrorStream, - }) -} - -// LogsOptions represents the set of options used when getting logs from a -// container. -// -// See https://goo.gl/yl8PGm for more details. -type LogsOptions struct { - Container string `qs:"-"` - OutputStream io.Writer `qs:"-"` - ErrorStream io.Writer `qs:"-"` - Follow bool - Stdout bool - Stderr bool - Since int64 - Timestamps bool - Tail string - - // Use raw terminal? Usually true when the container contains a TTY. - RawTerminal bool `qs:"-"` -} - -// Logs gets stdout and stderr logs from the specified container. -// -// See https://goo.gl/yl8PGm for more details. -func (c *Client) Logs(opts LogsOptions) error { - if opts.Container == "" { - return &NoSuchContainer{ID: opts.Container} - } - if opts.Tail == "" { - opts.Tail = "all" - } - path := "/containers/" + opts.Container + "/logs?" + queryString(opts) - return c.stream("GET", path, streamOptions{ - setRawTerminal: opts.RawTerminal, - stdout: opts.OutputStream, - stderr: opts.ErrorStream, - }) -} - -// ResizeContainerTTY resizes the terminal to the given height and width. -// -// See https://goo.gl/xERhCc for more details. -func (c *Client) ResizeContainerTTY(id string, height, width int) error { - params := make(url.Values) - params.Set("h", strconv.Itoa(height)) - params.Set("w", strconv.Itoa(width)) - resp, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), doOptions{}) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ExportContainerOptions is the set of parameters to the ExportContainer -// method. -// -// See https://goo.gl/dOkTyk for more details. -type ExportContainerOptions struct { - ID string - OutputStream io.Writer -} - -// ExportContainer export the contents of container id as tar archive -// and prints the exported contents to stdout. -// -// See https://goo.gl/dOkTyk for more details. -func (c *Client) ExportContainer(opts ExportContainerOptions) error { - if opts.ID == "" { - return &NoSuchContainer{ID: opts.ID} - } - url := fmt.Sprintf("/containers/%s/export", opts.ID) - return c.stream("GET", url, streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - }) -} - -// NoSuchContainer is the error returned when a given container does not exist. -type NoSuchContainer struct { - ID string - Err error -} - -func (err *NoSuchContainer) Error() string { - if err.Err != nil { - return err.Err.Error() - } - return "No such container: " + err.ID -} - -// ContainerAlreadyRunning is the error returned when a given container is -// already running. -type ContainerAlreadyRunning struct { - ID string -} - -func (err *ContainerAlreadyRunning) Error() string { - return "Container already running: " + err.ID -} - -// ContainerNotRunning is the error returned when a given container is not -// running. -type ContainerNotRunning struct { - ID string -} - -func (err *ContainerNotRunning) Error() string { - return "Container not running: " + err.ID -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go deleted file mode 100644 index c54b0b0e8..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package docker - -import ( - "encoding/json" - "fmt" - "io" - "strconv" - "strings" -) - -// Env represents a list of key-pair represented in the form KEY=VALUE. -type Env []string - -// Get returns the string value of the given key. -func (env *Env) Get(key string) (value string) { - return env.Map()[key] -} - -// Exists checks whether the given key is defined in the internal Env -// representation. -func (env *Env) Exists(key string) bool { - _, exists := env.Map()[key] - return exists -} - -// GetBool returns a boolean representation of the given key. The key is false -// whenever its value if 0, no, false, none or an empty string. Any other value -// will be interpreted as true. -func (env *Env) GetBool(key string) (value bool) { - s := strings.ToLower(strings.Trim(env.Get(key), " \t")) - if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { - return false - } - return true -} - -// SetBool defines a boolean value to the given key. -func (env *Env) SetBool(key string, value bool) { - if value { - env.Set(key, "1") - } else { - env.Set(key, "0") - } -} - -// GetInt returns the value of the provided key, converted to int. -// -// It the value cannot be represented as an integer, it returns -1. -func (env *Env) GetInt(key string) int { - return int(env.GetInt64(key)) -} - -// SetInt defines an integer value to the given key. -func (env *Env) SetInt(key string, value int) { - env.Set(key, strconv.Itoa(value)) -} - -// GetInt64 returns the value of the provided key, converted to int64. -// -// It the value cannot be represented as an integer, it returns -1. -func (env *Env) GetInt64(key string) int64 { - s := strings.Trim(env.Get(key), " \t") - val, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return -1 - } - return val -} - -// SetInt64 defines an integer (64-bit wide) value to the given key. -func (env *Env) SetInt64(key string, value int64) { - env.Set(key, strconv.FormatInt(value, 10)) -} - -// GetJSON unmarshals the value of the provided key in the provided iface. -// -// iface is a value that can be provided to the json.Unmarshal function. -func (env *Env) GetJSON(key string, iface interface{}) error { - sval := env.Get(key) - if sval == "" { - return nil - } - return json.Unmarshal([]byte(sval), iface) -} - -// SetJSON marshals the given value to JSON format and stores it using the -// provided key. -func (env *Env) SetJSON(key string, value interface{}) error { - sval, err := json.Marshal(value) - if err != nil { - return err - } - env.Set(key, string(sval)) - return nil -} - -// GetList returns a list of strings matching the provided key. It handles the -// list as a JSON representation of a list of strings. -// -// If the given key matches to a single string, it will return a list -// containing only the value that matches the key. -func (env *Env) GetList(key string) []string { - sval := env.Get(key) - if sval == "" { - return nil - } - var l []string - if err := json.Unmarshal([]byte(sval), &l); err != nil { - l = append(l, sval) - } - return l -} - -// SetList stores the given list in the provided key, after serializing it to -// JSON format. -func (env *Env) SetList(key string, value []string) error { - return env.SetJSON(key, value) -} - -// Set defines the value of a key to the given string. -func (env *Env) Set(key, value string) { - *env = append(*env, key+"="+value) -} - -// Decode decodes `src` as a json dictionary, and adds each decoded key-value -// pair to the environment. -// -// If `src` cannot be decoded as a json dictionary, an error is returned. -func (env *Env) Decode(src io.Reader) error { - m := make(map[string]interface{}) - if err := json.NewDecoder(src).Decode(&m); err != nil { - return err - } - for k, v := range m { - env.SetAuto(k, v) - } - return nil -} - -// SetAuto will try to define the Set* method to call based on the given value. -func (env *Env) SetAuto(key string, value interface{}) { - if fval, ok := value.(float64); ok { - env.SetInt64(key, int64(fval)) - } else if sval, ok := value.(string); ok { - env.Set(key, sval) - } else if val, err := json.Marshal(value); err == nil { - env.Set(key, string(val)) - } else { - env.Set(key, fmt.Sprintf("%v", value)) - } -} - -// Map returns the map representation of the env. -func (env *Env) Map() map[string]string { - if len(*env) == 0 { - return nil - } - m := make(map[string]string) - for _, kv := range *env { - parts := strings.SplitN(kv, "=", 2) - m[parts[0]] = parts[1] - } - return m -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go deleted file mode 100644 index eaffddb82..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "net" - "net/http" - "net/http/httputil" - "sync" - "sync/atomic" - "time" -) - -// APIEvents represents an event returned by the API. -type APIEvents struct { - Status string `json:"Status,omitempty" yaml:"Status,omitempty"` - ID string `json:"ID,omitempty" yaml:"ID,omitempty"` - From string `json:"From,omitempty" yaml:"From,omitempty"` - Time int64 `json:"Time,omitempty" yaml:"Time,omitempty"` -} - -type eventMonitoringState struct { - sync.RWMutex - sync.WaitGroup - enabled bool - lastSeen *int64 - C chan *APIEvents - errC chan error - listeners []chan<- *APIEvents -} - -const ( - maxMonitorConnRetries = 5 - retryInitialWaitTime = 10. -) - -var ( - // ErrNoListeners is the error returned when no listeners are available - // to receive an event. - ErrNoListeners = errors.New("no listeners present to receive event") - - // ErrListenerAlreadyExists is the error returned when the listerner already - // exists. - ErrListenerAlreadyExists = errors.New("listener already exists for docker events") - - // EOFEvent is sent when the event listener receives an EOF error. - EOFEvent = &APIEvents{ - Status: "EOF", - } -) - -// AddEventListener adds a new listener to container events in the Docker API. -// -// The parameter is a channel through which events will be sent. -func (c *Client) AddEventListener(listener chan<- *APIEvents) error { - var err error - if !c.eventMonitor.isEnabled() { - err = c.eventMonitor.enableEventMonitoring(c) - if err != nil { - return err - } - } - err = c.eventMonitor.addListener(listener) - if err != nil { - return err - } - return nil -} - -// RemoveEventListener removes a listener from the monitor. -func (c *Client) RemoveEventListener(listener chan *APIEvents) error { - err := c.eventMonitor.removeListener(listener) - if err != nil { - return err - } - if len(c.eventMonitor.listeners) == 0 { - c.eventMonitor.disableEventMonitoring() - } - return nil -} - -func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error { - eventState.Lock() - defer eventState.Unlock() - if listenerExists(listener, &eventState.listeners) { - return ErrListenerAlreadyExists - } - eventState.Add(1) - eventState.listeners = append(eventState.listeners, listener) - return nil -} - -func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error { - eventState.Lock() - defer eventState.Unlock() - if listenerExists(listener, &eventState.listeners) { - var newListeners []chan<- *APIEvents - for _, l := range eventState.listeners { - if l != listener { - newListeners = append(newListeners, l) - } - } - eventState.listeners = newListeners - eventState.Add(-1) - } - return nil -} - -func (eventState *eventMonitoringState) closeListeners() { - for _, l := range eventState.listeners { - close(l) - eventState.Add(-1) - } - eventState.listeners = nil -} - -func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool { - for _, b := range *list { - if b == a { - return true - } - } - return false -} - -func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error { - eventState.Lock() - defer eventState.Unlock() - if !eventState.enabled { - eventState.enabled = true - var lastSeenDefault = int64(0) - eventState.lastSeen = &lastSeenDefault - eventState.C = make(chan *APIEvents, 100) - eventState.errC = make(chan error, 1) - go eventState.monitorEvents(c) - } - return nil -} - -func (eventState *eventMonitoringState) disableEventMonitoring() error { - eventState.Lock() - defer eventState.Unlock() - - eventState.closeListeners() - - eventState.Wait() - - if eventState.enabled { - eventState.enabled = false - close(eventState.C) - close(eventState.errC) - } - return nil -} - -func (eventState *eventMonitoringState) monitorEvents(c *Client) { - var err error - for eventState.noListeners() { - time.Sleep(10 * time.Millisecond) - } - if err = eventState.connectWithRetry(c); err != nil { - // terminate if connect failed - eventState.disableEventMonitoring() - return - } - for eventState.isEnabled() { - timeout := time.After(100 * time.Millisecond) - select { - case ev, ok := <-eventState.C: - if !ok { - return - } - if ev == EOFEvent { - eventState.disableEventMonitoring() - return - } - eventState.updateLastSeen(ev) - go eventState.sendEvent(ev) - case err = <-eventState.errC: - if err == ErrNoListeners { - eventState.disableEventMonitoring() - return - } else if err != nil { - defer func() { go eventState.monitorEvents(c) }() - return - } - case <-timeout: - continue - } - } -} - -func (eventState *eventMonitoringState) connectWithRetry(c *Client) error { - var retries int - var err error - for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ { - waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries))) - time.Sleep(time.Duration(waitTime) * time.Millisecond) - err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC) - } - return err -} - -func (eventState *eventMonitoringState) noListeners() bool { - eventState.RLock() - defer eventState.RUnlock() - return len(eventState.listeners) == 0 -} - -func (eventState *eventMonitoringState) isEnabled() bool { - eventState.RLock() - defer eventState.RUnlock() - return eventState.enabled -} - -func (eventState *eventMonitoringState) sendEvent(event *APIEvents) { - eventState.RLock() - defer eventState.RUnlock() - eventState.Add(1) - defer eventState.Done() - if eventState.enabled { - if len(eventState.listeners) == 0 { - eventState.errC <- ErrNoListeners - return - } - - for _, listener := range eventState.listeners { - listener <- event - } - } -} - -func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) { - eventState.Lock() - defer eventState.Unlock() - if atomic.LoadInt64(eventState.lastSeen) < e.Time { - atomic.StoreInt64(eventState.lastSeen, e.Time) - } -} - -func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error { - uri := "/events" - if startTime != 0 { - uri += fmt.Sprintf("?since=%d", startTime) - } - protocol := c.endpointURL.Scheme - address := c.endpointURL.Path - if protocol != "unix" { - protocol = "tcp" - address = c.endpointURL.Host - } - var dial net.Conn - var err error - if c.TLSConfig == nil { - dial, err = c.Dialer.Dial(protocol, address) - } else { - dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig) - } - if err != nil { - return err - } - conn := httputil.NewClientConn(dial, nil) - req, err := http.NewRequest("GET", uri, nil) - if err != nil { - return err - } - res, err := conn.Do(req) - if err != nil { - return err - } - go func(res *http.Response, conn *httputil.ClientConn) { - defer conn.Close() - defer res.Body.Close() - decoder := json.NewDecoder(res.Body) - for { - var event APIEvents - if err = decoder.Decode(&event); err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { - if c.eventMonitor.isEnabled() { - // Signal that we're exiting. - eventChan <- EOFEvent - } - break - } - errChan <- err - } - if event.Time == 0 { - continue - } - if !c.eventMonitor.isEnabled() { - return - } - eventChan <- &event - } - }(res, conn) - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go deleted file mode 100644 index f3b705fa0..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strconv" -) - -// Exec is the type representing a `docker exec` instance and containing the -// instance ID -type Exec struct { - ID string `json:"Id,omitempty" yaml:"Id,omitempty"` -} - -// CreateExecOptions specify parameters to the CreateExecContainer function. -// -// See https://goo.gl/1KSIb7 for more details -type CreateExecOptions struct { - AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` - AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` - AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` - Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` - Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"` - Container string `json:"Container,omitempty" yaml:"Container,omitempty"` - User string `json:"User,omitempty" yaml:"User,omitempty"` -} - -// CreateExec sets up an exec instance in a running container `id`, returning the exec -// instance, or an error in case of failure. -// -// See https://goo.gl/1KSIb7 for more details -func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) { - path := fmt.Sprintf("/containers/%s/exec", opts.Container) - resp, err := c.do("POST", path, doOptions{data: opts}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: opts.Container} - } - return nil, err - } - defer resp.Body.Close() - var exec Exec - if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil { - return nil, err - } - - return &exec, nil -} - -// StartExecOptions specify parameters to the StartExecContainer function. -// -// See https://goo.gl/iQCnto for more details -type StartExecOptions struct { - Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"` - - Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` - - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - ErrorStream io.Writer `qs:"-"` - - // Use raw terminal? Usually true when the container contains a TTY. - RawTerminal bool `qs:"-"` - - // If set, after a successful connect, a sentinel will be sent and then the - // client will block on receive before continuing. - // - // It must be an unbuffered channel. Using a buffered channel can lead - // to unexpected behavior. - Success chan struct{} `json:"-"` -} - -// StartExec starts a previously set up exec instance id. If opts.Detach is -// true, it returns after starting the exec command. Otherwise, it sets up an -// interactive session with the exec command. -// -// See https://goo.gl/iQCnto for more details -func (c *Client) StartExec(id string, opts StartExecOptions) error { - if id == "" { - return &NoSuchExec{ID: id} - } - - path := fmt.Sprintf("/exec/%s/start", id) - - if opts.Detach { - resp, err := c.do("POST", path, doOptions{data: opts}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchExec{ID: id} - } - return err - } - defer resp.Body.Close() - return nil - } - - return c.hijack("POST", path, hijackOptions{ - success: opts.Success, - setRawTerminal: opts.RawTerminal, - in: opts.InputStream, - stdout: opts.OutputStream, - stderr: opts.ErrorStream, - data: opts, - }) -} - -// ResizeExecTTY resizes the tty session used by the exec command id. This API -// is valid only if Tty was specified as part of creating and starting the exec -// command. -// -// See https://goo.gl/e1JpsA for more details -func (c *Client) ResizeExecTTY(id string, height, width int) error { - params := make(url.Values) - params.Set("h", strconv.Itoa(height)) - params.Set("w", strconv.Itoa(width)) - - path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode()) - resp, err := c.do("POST", path, doOptions{}) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ExecProcessConfig is a type describing the command associated to a Exec -// instance. It's used in the ExecInspect type. -type ExecProcessConfig struct { - Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"` - User string `json:"user,omitempty" yaml:"user,omitempty"` - Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"` - EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"` - Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"` -} - -// ExecInspect is a type with details about a exec instance, including the -// exit code if the command has finished running. It's returned by a api -// call to /exec/(id)/json -// -// See https://goo.gl/gPtX9R for more details -type ExecInspect struct { - ID string `json:"ID,omitempty" yaml:"ID,omitempty"` - Running bool `json:"Running,omitempty" yaml:"Running,omitempty"` - ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` - OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"` - OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"` - ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"` - Container Container `json:"Container,omitempty" yaml:"Container,omitempty"` -} - -// InspectExec returns low-level information about the exec command id. -// -// See https://goo.gl/gPtX9R for more details -func (c *Client) InspectExec(id string) (*ExecInspect, error) { - path := fmt.Sprintf("/exec/%s/json", id) - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchExec{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var exec ExecInspect - if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil { - return nil, err - } - return &exec, nil -} - -// NoSuchExec is the error returned when a given exec instance does not exist. -type NoSuchExec struct { - ID string -} - -func (err *NoSuchExec) Error() string { - return "No such exec instance: " + err.ID -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index ecc843272..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,55 +0,0 @@ -# 0.9.0 (Unreleased) - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42f..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index 53d27d449..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,358 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | -| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events. - - ```go - logrus.SetFormatter(&logstash.LogstashFormatter{Type: “application_name"}) - ``` - -Third party logging formatters: - -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - - -[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go deleted file mode 100644 index dddd5f877..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package logrus is a structured logger for Go, completely API compatible with the standard library logger. - - -The simplest way to use Logrus is simply the package-level exported logger: - - package main - - import ( - log "github.com/Sirupsen/logrus" - ) - - func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "number": 1, - "size": 10, - }).Info("A walrus appears") - } - -Output: - time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 - -For a full guide visit https://github.com/Sirupsen/logrus -*/ -package logrus diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go deleted file mode 100644 index 699ea035c..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go +++ /dev/null @@ -1,254 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "io" - "os" - "time" -) - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic - Level Level - - // Message passed to Debug, Info, Warn, Error, Fatal or Panic - Message string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), - } -} - -// Returns a reader for the entry, which is a proxy to the formatter. -func (entry *Entry) Reader() (*bytes.Buffer, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - return bytes.NewBuffer(serialized), err -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - reader, err := entry.Reader() - if err != nil { - return "", err - } - - return reader.String(), err -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := Fields{} - for k, v := range entry.Data { - data[k] = v - } - for k, v := range fields { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data} -} - -func (entry *Entry) log(level Level, msg string) { - entry.Time = time.Now() - entry.Level = level - entry.Message = msg - - if err := entry.Logger.Hooks.Fire(level, entry); err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() - } - - reader, err := entry.Reader() - if err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() - } - - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - - _, err = io.Copy(entry.Logger.Out, reader) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(entry) - } -} - -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } -} - -// Entry Println family functions - -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go deleted file mode 100644 index a67e1b802..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go +++ /dev/null @@ -1,188 +0,0 @@ -package logrus - -import ( - "io" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() - std.Level = level -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.Level -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go deleted file mode 100644 index 104d689f1..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go +++ /dev/null @@ -1,48 +0,0 @@ -package logrus - -import "time" - -const DefaultTimestampFormat = time.RFC3339 - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - _, ok := data["time"] - if ok { - data["fields.time"] = data["time"] - } - - _, ok = data["msg"] - if ok { - data["fields.msg"] = data["msg"] - } - - _, ok = data["level"] - if ok { - data["fields.level"] = data["level"] - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go deleted file mode 100644 index 3f151cdc3..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index 2ad6dc5cf..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,41 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" -) - -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string -} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - prefixFieldClashes(data) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - - data["time"] = entry.Time.Format(timestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go deleted file mode 100644 index e4974bfbe..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go +++ /dev/null @@ -1,206 +0,0 @@ -package logrus - -import ( - "io" - "os" - "sync" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stdout`. You can also set this to - // something more adventorous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in - Level Level - // Used to sync writing to the log. - mu sync.Mutex -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(LevelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, - } -} - -// Adds a field to the log entry, note that you it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// Ff you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - return NewEntry(logger).WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - return NewEntry(logger).WithFields(fields) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugf(format, args...) - } -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infof(format, args...) - } -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - NewEntry(logger).Printf(format, args...) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorf(format, args...) - } -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalf(format, args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicf(format, args...) - } -} - -func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debug(args...) - } -} - -func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Info(args...) - } -} - -func (logger *Logger) Print(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Error(args...) - } -} - -func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatal(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panic(args...) - } -} - -func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugln(args...) - } -} - -func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infoln(args...) - } -} - -func (logger *Logger) Println(args ...interface{}) { - NewEntry(logger).Println(args...) -} - -func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorln(args...) - } -} - -func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalln(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicln(args...) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go deleted file mode 100644 index 43ee12e90..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go +++ /dev/null @@ -1,94 +0,0 @@ -package logrus - -import ( - "fmt" - "log" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint8 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" - } - - return "unknown" -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch lvl { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var _ StdLogger = &log.Logger{} - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go deleted file mode 100644 index 71f8d67a5..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go deleted file mode 100644 index a2c0b40db..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index 4bb537602..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd netbsd dragonfly - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 2e09f6f7e..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 06ef20233..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,161 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "runtime" - "sort" - "strings" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 34 - gray = 37 -) - -var ( - baseTimestamp time.Time - isTerminal bool -) - -func init() { - baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) -} - -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool -} - -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var keys []string = make([]string, 0, len(entry.Data)) - for k := range entry.Data { - keys = append(keys, k) - } - - if !f.DisableSorting { - sort.Strings(keys) - } - - b := &bytes.Buffer{} - - prefixFieldClashes(entry.Data) - - isColorTerminal := isTerminal && (runtime.GOOS != "windows") - isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - if isColored { - f.printColored(b, entry, keys, timestampFormat) - } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - if entry.Message != "" { - f.appendKeyValue(b, "msg", entry.Message) - } - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { - var levelColor int - switch entry.Level { - case DebugLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) - } - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) - } -} - -func needsQuoting(text string) bool { - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { - return false - } - } - return true -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - - b.WriteString(key) - b.WriteByte('=') - - switch value := value.(type) { - case string: - if needsQuoting(value) { - b.WriteString(value) - } else { - fmt.Fprintf(b, "%q", value) - } - case error: - errmsg := value.Error() - if needsQuoting(errmsg) { - b.WriteString(errmsg) - } else { - fmt.Fprintf(b, "%q", value) - } - default: - fmt.Fprint(b, value) - } - - b.WriteByte(' ') -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go deleted file mode 100644 index 1e30b1c75..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go +++ /dev/null @@ -1,31 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -func (logger *Logger) Writer() *io.PipeWriter { - reader, writer := io.Pipe() - - go logger.writerScanner(reader) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (logger *Logger) writerScanner(reader *io.PipeReader) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - logger.Print(scanner.Text()) - } - if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/errors.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/errors.go deleted file mode 100644 index fdaddbcf8..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/errors.go +++ /dev/null @@ -1,259 +0,0 @@ -package errcode - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ErrorCoder is the base interface for ErrorCode and Error allowing -// users of each to just call ErrorCode to get the real ID of each -type ErrorCoder interface { - ErrorCode() ErrorCode -} - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -var _ error = ErrorCode(0) - -// ErrorCode just returns itself -func (ec ErrorCode) ErrorCode() ErrorCode { - return ec -} - -// Error returns the ID/Value -func (ec ErrorCode) Error() string { - return ec.Descriptor().Value -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// WithDetail creates a new Error struct based on the passed-in info and -// set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithDetail(detail) -} - -// WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithArgs(args...) -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` - - // TODO(duglin): See if we need an "args" property so we can do the - // variable substitution right before showing the message to the user -} - -var _ error = Error{} - -// ErrorCode returns the ID/Value of this Error -func (e Error) ErrorCode() ErrorCode { - return e.Code -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", - strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), - e.Message) -} - -// WithDetail will return a new Error, based on the current one, but with -// some Detail info added -func (e Error) WithDetail(detail interface{}) Error { - return Error{ - Code: e.Code, - Message: e.Message, - Detail: detail, - } -} - -// WithArgs uses the passed-in list of interface{} as the substitution -// variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { - return Error{ - Code: e.Code, - Message: fmt.Sprintf(e.Code.Message(), args...), - Detail: e.Detail, - } -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors []error - -var _ error = Errors{} - -func (errs Errors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - default: - msg := "errors:\n" - for _, err := range errs { - msg += err.Error() + "\n" - } - return msg - } -} - -// Len returns the current number of errors. -func (errs Errors) Len() int { - return len(errs) -} - -// MarshalJSON converts slice of error, ErrorCode or Error into a -// slice of Error - then serializes -func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs struct { - Errors []Error `json:"errors,omitempty"` - } - - for _, daErr := range errs { - var err Error - - switch daErr.(type) { - case ErrorCode: - err = daErr.(ErrorCode).WithDetail(nil) - case Error: - err = daErr.(Error) - default: - err = ErrorCodeUnknown.WithDetail(daErr) - - } - - // If the Error struct was setup and they forgot to set the - // Message field (meaning its "") then grab it from the ErrCode - msg := err.Message - if msg == "" { - msg = err.Code.Message() - } - - tmpErrs.Errors = append(tmpErrs.Errors, Error{ - Code: err.Code, - Message: msg, - Detail: err.Detail, - }) - } - - return json.Marshal(tmpErrs) -} - -// UnmarshalJSON deserializes []Error and then converts it into slice of -// Error or ErrorCode -func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs struct { - Errors []Error - } - - if err := json.Unmarshal(data, &tmpErrs); err != nil { - return err - } - - var newErrs Errors - for _, daErr := range tmpErrs.Errors { - // If Message is empty or exactly matches the Code's message string - // then just use the Code, no need for a full Error struct - if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { - // Error's w/o details get converted to ErrorCode - newErrs = append(newErrs, daErr.Code) - } else { - // Error's w/ details are untouched - newErrs = append(newErrs, Error{ - Code: daErr.Code, - Message: daErr.Message, - Detail: daErr.Detail, - }) - } - } - - *errs = newErrs - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/handler.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/handler.go deleted file mode 100644 index 49a64a86e..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/handler.go +++ /dev/null @@ -1,44 +0,0 @@ -package errcode - -import ( - "encoding/json" - "net/http" -) - -// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err -// and sets the content-type header to 'application/json'. It will handle -// ErrorCoder and Errors, and if necessary will create an envelope. -func ServeJSON(w http.ResponseWriter, err error) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - var sc int - - switch errs := err.(type) { - case Errors: - if len(errs) < 1 { - break - } - - if err, ok := errs[0].(ErrorCoder); ok { - sc = err.ErrorCode().Descriptor().HTTPStatusCode - } - case ErrorCoder: - sc = errs.ErrorCode().Descriptor().HTTPStatusCode - err = Errors{err} // create an envelope. - default: - // We just have an unhandled error type, so just place in an envelope - // and move along. - err = Errors{err} - } - - if sc == 0 { - sc = http.StatusInternalServerError - } - - w.WriteHeader(sc) - - if err := json.NewEncoder(w).Encode(err); err != nil { - return err - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/register.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/register.go deleted file mode 100644 index 01c34384b..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/register.go +++ /dev/null @@ -1,128 +0,0 @@ -package errcode - -import ( - "fmt" - "net/http" - "sort" - "sync" -) - -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -var ( - // ErrorCodeUnknown is a generic error that can be used as a last - // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusMethodNotAllowed, - }) - - // ErrorCodeUnauthorized is returned if a request requires - // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "authentication required", - Description: `The access controller was unable to authenticate - the client. Often this will be accompanied by a - Www-Authenticate HTTP response header indicating how to - authenticate.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - - // ErrorCodeDenied is returned if a client does not have sufficient - // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ - Value: "DENIED", - Message: "requested access to the resource is denied", - Description: `The access controller denied access for the - operation on a resource.`, - HTTPStatusCode: http.StatusForbidden, - }) - - // ErrorCodeUnavailable provides a common error to report unavialability - // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ - Value: "UNAVAILABLE", - Message: "service unavailable", - Description: "Returned when a service is not available", - HTTPStatusCode: http.StatusServiceUnavailable, - }) -) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - - descriptor.Code = ErrorCode(nextCode) - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return descriptor.Code -} - -type byValue []ErrorDescriptor - -func (a byValue) Len() int { return len(a) } -func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - desc := groupToDescriptors[name] - sort.Sort(byValue(desc)) - return desc -} - -// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are -// registered, irrespective of what group they're in -func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { - result = append(result, GetErrorCodeGroup(group)...) - } - sort.Sort(byValue(result)) - return result -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/README.md deleted file mode 100644 index 81fa04ccc..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/README.md +++ /dev/null @@ -1,58 +0,0 @@ -Docker 'errors' package -======================= - -This package contains all of the error messages generated by the Docker -engine that might be exposed via the Docker engine's REST API. - -Each top-level engine package will have its own file in this directory -so that there's a clear grouping of errors, instead of just one big -file. The errors for each package are defined here instead of within -their respective package structure so that Docker CLI code that may need -to import these error definition files will not need to know or understand -the engine's package/directory structure. In other words, all they should -need to do is import `.../docker/errors` and they will automatically -pick up all Docker engine defined errors. This also gives the engine -developers the freedom to change the engine packaging structure (e.g. to -CRUD packages) without worrying about breaking existing clients. - -These errors are defined using the 'errcode' package. The `errcode` package -allows for each error to be typed and include all information necessary to -have further processing done on them if necessary. In particular, each error -includes: - -* Value - a unique string (in all caps) associated with this error. -Typically, this string is the same name as the variable name of the error -(w/o the `ErrorCode` text) but in all caps. - -* Message - the human readable sentence that will be displayed for this -error. It can contain '%s' substitutions that allows for the code generating -the error to specify values that will be inserted in the string prior to -being displayed to the end-user. The `WithArgs()` function can be used to -specify the insertion strings. Note, the evaluation of the strings will be -done at the time `WithArgs()` is called. - -* Description - additional human readable text to further explain the -circumstances of the error situation. - -* HTTPStatusCode - when the error is returned back to a CLI, this value -will be used to populate the HTTP status code. If not present the default -value will be `StatusInternalServerError`, 500. - -Not all errors generated within the engine's executable will be propagated -back to the engine's API layer. For example, it is expected that errors -generated by vendored code (under `docker/vendor`) and packaged code -(under `docker/pkg`) will be converted into errors defined by this package. - -When processing an errcode error, if you are looking for a particular -error then you can do something like: - -``` -import derr "github.com/docker/docker/errors" - -... - -err := someFunc() -if err.ErrorCode() == derr.ErrorCodeNoSuchContainer { - ... -} -``` diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/builder.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/builder.go deleted file mode 100644 index 07a33bbf7..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/builder.go +++ /dev/null @@ -1,93 +0,0 @@ -package errors - -// This file contains all of the errors that can be generated from the -// docker/builder component. - -import ( - "net/http" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode" -) - -var ( - // ErrorCodeAtLeastOneArg is generated when the parser comes across a - // Dockerfile command that doesn't have any args. - ErrorCodeAtLeastOneArg = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "ATLEASTONEARG", - Message: "%s requires at least one argument", - Description: "The specified command requires at least one argument", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeExactlyOneArg is generated when the parser comes across a - // Dockerfile command that requires exactly one arg but got less/more. - ErrorCodeExactlyOneArg = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXACTLYONEARG", - Message: "%s requires exactly one argument", - Description: "The specified command requires exactly one argument", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeAtLeastTwoArgs is generated when the parser comes across a - // Dockerfile command that requires at least two args but got less. - ErrorCodeAtLeastTwoArgs = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "ATLEASTTWOARGS", - Message: "%s requires at least two arguments", - Description: "The specified command requires at least two arguments", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeTooManyArgs is generated when the parser comes across a - // Dockerfile command that has more args than it should - ErrorCodeTooManyArgs = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "TOOMANYARGS", - Message: "Bad input to %s, too many args", - Description: "The specified command was passed too many arguments", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeChainOnBuild is generated when the parser comes across a - // Dockerfile command that is trying to chain ONBUILD commands. - ErrorCodeChainOnBuild = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CHAINONBUILD", - Message: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", - Description: "ONBUILD Dockerfile commands aren't allow on ONBUILD commands", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeBadOnBuildCmd is generated when the parser comes across a - // an ONBUILD Dockerfile command with an invalid trigger/command. - ErrorCodeBadOnBuildCmd = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BADONBUILDCMD", - Message: "%s isn't allowed as an ONBUILD trigger", - Description: "The specified ONBUILD command isn't allowed", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeMissingFrom is generated when the Dockerfile is missing - // a FROM command. - ErrorCodeMissingFrom = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MISSINGFROM", - Message: "Please provide a source image with `from` prior to run", - Description: "The Dockerfile is missing a FROM command", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNotOnWindows is generated when the specified Dockerfile - // command is not supported on Windows. - ErrorCodeNotOnWindows = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOTONWINDOWS", - Message: "%s is not supported on Windows", - Description: "The specified Dockerfile command is not supported on Windows", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeEmpty is generated when the specified Volume string - // is empty. - ErrorCodeVolumeEmpty = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEEMPTY", - Message: "Volume specified can not be an empty string", - Description: "The specified volume can not be an empty string", - HTTPStatusCode: http.StatusInternalServerError, - }) -) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/daemon.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/daemon.go deleted file mode 100644 index 29d077a9a..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/daemon.go +++ /dev/null @@ -1,925 +0,0 @@ -package errors - -// This file contains all of the errors that can be generated from the -// docker/daemon component. - -import ( - "net/http" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode" -) - -var ( - // ErrorCodeNoSuchContainer is generated when we look for a container by - // name or ID and we can't find it. - ErrorCodeNoSuchContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOSUCHCONTAINER", - Message: "no such id: %s", - Description: "The specified container can not be found", - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeUnregisteredContainer is generated when we try to load - // a storage driver for an unregistered container - ErrorCodeUnregisteredContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "UNREGISTEREDCONTAINER", - Message: "Can't load storage driver for unregistered container %s", - Description: "An attempt was made to load the storage driver for a container that is not registered with the daemon", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeContainerBeingRemoved is generated when an attempt to start - // a container is made but its in the process of being removed, or is dead. - ErrorCodeContainerBeingRemoved = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CONTAINERBEINGREMOVED", - Message: "Container is marked for removal and cannot be started.", - Description: "An attempt was made to start a container that is in the process of being deleted", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnpauseContainer is generated when we attempt to stop a - // container but its paused. - ErrorCodeUnpauseContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "UNPAUSECONTAINER", - Message: "Container %s is paused. Unpause the container before stopping", - Description: "The specified container is paused, before it can be stopped it must be unpaused", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeAlreadyPaused is generated when we attempt to pause a - // container when its already paused. - ErrorCodeAlreadyPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "ALREADYPAUSED", - Message: "Container %s is already paused", - Description: "The specified container is already in the paused state", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNotPaused is generated when we attempt to unpause a - // container when its not paused. - ErrorCodeNotPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOTPAUSED", - Message: "Container %s is not paused", - Description: "The specified container can not be unpaused because it is not in a paused state", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeImageUnregContainer is generated when we attempt to get the - // image of an unknown/unregistered container. - ErrorCodeImageUnregContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "IMAGEUNREGCONTAINER", - Message: "Can't get image of unregistered container", - Description: "An attempt to retrieve the image of a container was made but the container is not registered", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeEmptyID is generated when an ID is the emptry string. - ErrorCodeEmptyID = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EMPTYID", - Message: "Invalid empty id", - Description: "An attempt was made to register a container but the container's ID can not be an empty string", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeLoggingFactory is generated when we could not load the - // log driver. - ErrorCodeLoggingFactory = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "LOGGINGFACTORY", - Message: "Failed to get logging factory: %v", - Description: "An attempt was made to register a container but the container's ID can not be an empty string", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeInitLogger is generated when we could not initialize - // the logging driver. - ErrorCodeInitLogger = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "INITLOGGER", - Message: "Failed to initialize logging driver: %v", - Description: "An error occurred while trying to initialize the logging driver", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNotRunning is generated when we need to verify that - // a container is running, but its not. - ErrorCodeNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOTRUNNING", - Message: "Container %s is not running", - Description: "The specified action can not be taken due to the container not being in a running state", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeLinkNotRunning is generated when we try to link to a - // container that is not running. - ErrorCodeLinkNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "LINKNOTRUNNING", - Message: "Cannot link to a non running container: %s AS %s", - Description: "An attempt was made to link to a container but the container is not in a running state", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeDeviceInfo is generated when there is an error while trying - // to get info about a custom device. - // container that is not running. - ErrorCodeDeviceInfo = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DEVICEINFO", - Message: "error gathering device information while adding custom device %q: %s", - Description: "There was an error while trying to retrieve the information about a custom device", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeEmptyEndpoint is generated when the endpoint for a port - // map is nil. - ErrorCodeEmptyEndpoint = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EMPTYENDPOINT", - Message: "invalid endpoint while building port map info", - Description: "The specified endpoint for the port mapping is empty", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeEmptyNetwork is generated when the networkSettings for a port - // map is nil. - ErrorCodeEmptyNetwork = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EMPTYNETWORK", - Message: "invalid networksettings while building port map info", - Description: "The specified endpoint for the port mapping is empty", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeParsingPort is generated when there is an error parsing - // a "port" string. - ErrorCodeParsingPort = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "PARSINGPORT", - Message: "Error parsing Port value(%v):%v", - Description: "There was an error while trying to parse the specified 'port' value", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNoSandbox is generated when we can't find the specified - // sandbox(network) by ID. - ErrorCodeNoSandbox = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOSANDBOX", - Message: "error locating sandbox id %s: %v", - Description: "There was an error trying to located the specified networking sandbox", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNetworkUpdate is generated when there is an error while - // trying update a network/sandbox config. - ErrorCodeNetworkUpdate = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NETWORKUPDATE", - Message: "Update network failed: %v", - Description: "There was an error trying to update the configuration information of the specified network sandbox", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNetworkRefresh is generated when there is an error while - // trying refresh a network/sandbox config. - ErrorCodeNetworkRefresh = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NETWORKREFRESH", - Message: "Update network failed: Failure in refresh sandbox %s: %v", - Description: "There was an error trying to refresh the configuration information of the specified network sandbox", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeHostPort is generated when there was an error while trying - // to parse a "host/port" string. - ErrorCodeHostPort = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "HOSTPORT", - Message: "Error parsing HostPort value(%s):%v", - Description: "There was an error trying to parse the specified 'HostPort' value", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNetworkConflict is generated when we try to publish a service - // in network mode. - ErrorCodeNetworkConflict = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NETWORKCONFLICT", - Message: "conflicting options: publishing a service and network mode", - Description: "It is not possible to publish a service when it is in network mode", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeJoinInfo is generated when we failed to update a container's - // join info. - ErrorCodeJoinInfo = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "JOININFO", - Message: "Updating join info failed: %v", - Description: "There was an error during an attempt update a container's join information", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeIPCRunning is generated when we try to join a container's - // IPC but its not running. - ErrorCodeIPCRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "IPCRUNNING", - Message: "cannot join IPC of a non running container: %s", - Description: "An attempt was made to join the IPC of a container, but the container is not running", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNotADir is generated when we try to create a directory - // but the path isn't a dir. - ErrorCodeNotADir = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOTADIR", - Message: "Cannot mkdir: %s is not a directory", - Description: "An attempt was made create a directory, but the location in which it is being created is not a directory", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeParseContainer is generated when the reference to a - // container doesn't include a ":" (another container). - ErrorCodeParseContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "PARSECONTAINER", - Message: "no container specified to join network", - Description: "The specified reference to a container is missing a ':' as a separator between 'container' and 'name'/'id'", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeJoinSelf is generated when we try to network to ourselves. - ErrorCodeJoinSelf = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "JOINSELF", - Message: "cannot join own network", - Description: "An attempt was made to have a container join its own network", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeJoinRunning is generated when we try to network to ourselves. - ErrorCodeJoinRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "JOINRUNNING", - Message: "cannot join network of a non running container: %s", - Description: "An attempt to join the network of a container, but that container isn't running", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeModeNotContainer is generated when we try to network to - // another container but the mode isn't 'container'. - ErrorCodeModeNotContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MODENOTCONTAINER", - Message: "network mode not set to container", - Description: "An attempt was made to connect to a container's network but the mode wasn't set to 'container'", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRemovingVolume is generated when we try remove a mount - // point (volume) but fail. - ErrorCodeRemovingVolume = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "REMOVINGVOLUME", - Message: "Error removing volumes:\n%v", - Description: "There was an error while trying to remove the mount point (volume) of a container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeInvalidNetworkMode is generated when an invalid network - // mode value is specified. - ErrorCodeInvalidNetworkMode = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "INVALIDNETWORKMODE", - Message: "invalid network mode: %s", - Description: "The specified networking mode is not valid", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeGetGraph is generated when there was an error while - // trying to find a graph/image. - ErrorCodeGetGraph = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "GETGRAPH", - Message: "Failed to graph.Get on ImageID %s - %s", - Description: "There was an error trying to retrieve the image for the specified image ID", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeGetLayer is generated when there was an error while - // trying to retrieve a particular layer of an image. - ErrorCodeGetLayer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "GETLAYER", - Message: "Failed to get layer path from graphdriver %s for ImageID %s - %s", - Description: "There was an error trying to retrieve the layer of the specified image", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodePutLayer is generated when there was an error while - // trying to 'put' a particular layer of an image. - ErrorCodePutLayer = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "PUTLAYER", - Message: "Failed to put layer path from graphdriver %s for ImageID %s - %s", - Description: "There was an error trying to store a layer for the specified image", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeGetLayerMetadata is generated when there was an error while - // trying to retrieve the metadata of a layer of an image. - ErrorCodeGetLayerMetadata = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "GETLAYERMETADATA", - Message: "Failed to get layer metadata - %s", - Description: "There was an error trying to retrieve the metadata of a layer for the specified image", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeEmptyConfig is generated when the input config data - // is empty. - ErrorCodeEmptyConfig = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EMPTYCONFIG", - Message: "Config cannot be empty in order to create a container", - Description: "While trying to create a container, the specified configuration information was empty", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNoSuchImageHash is generated when we can't find the - // specified image by its hash - ErrorCodeNoSuchImageHash = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOSUCHIMAGEHASH", - Message: "No such image: %s", - Description: "An attempt was made to find an image by its hash, but the lookup failed", - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeNoSuchImageTag is generated when we can't find the - // specified image byt its name/tag. - ErrorCodeNoSuchImageTag = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOSUCHIMAGETAG", - Message: "No such image: %s:%s", - Description: "An attempt was made to find an image by its name/tag, but the lookup failed", - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeMountOverFile is generated when we try to mount a volume - // over an existing file (but not a dir). - ErrorCodeMountOverFile = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MOUNTOVERFILE", - Message: "cannot mount volume over existing file, file exists %s", - Description: "An attempt was made to mount a volume at the same location as a pre-existing file", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeMountSetup is generated when we can't define a mount point - // due to the source and destination being undefined. - ErrorCodeMountSetup = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MOUNTSETUP", - Message: "Unable to setup mount point, neither source nor volume defined", - Description: "An attempt was made to setup a mount point, but the source and destination are undefined", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeInvalidMode is generated when we the mode of a volume/bind - // mount is invalid. - ErrorCodeVolumeInvalidMode = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEINVALIDMODE", - Message: "invalid mode: %s", - Description: "An invalid 'mode' was specified", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeInvalid is generated when the format fo the - // volume specification isn't valid. - ErrorCodeVolumeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEINVALID", - Message: "Invalid volume specification: %s", - Description: "An invalid 'volume' was specified in the mount request", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeAbs is generated when path to a volume isn't absolute. - ErrorCodeVolumeAbs = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEABS", - Message: "Invalid volume destination path: %s mount path must be absolute.", - Description: "An invalid 'destination' path was specified in the mount request, it must be an absolute path", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeName is generated when the name of named volume isn't valid. - ErrorCodeVolumeName = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUME_NAME_INVALID", - Message: "%s includes invalid characters for a local volume name, only %s are allowed", - Description: "The name of volume is invalid", - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeVolumeSlash is generated when destination path to a volume is / - ErrorCodeVolumeSlash = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMESLASH", - Message: "Invalid specification: destination can't be '/' in '%s'", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeDestIsC is generated the destination is c: (Windows specific) - ErrorCodeVolumeDestIsC = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEDESTISC", - Message: "Destination drive letter in '%s' cannot be c:", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeDestIsCRoot is generated the destination path is c:\ (Windows specific) - ErrorCodeVolumeDestIsCRoot = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEDESTISCROOT", - Message: `Destination path in '%s' cannot be c:\`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeSourceNotFound is generated the source directory could not be found (Windows specific) - ErrorCodeVolumeSourceNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMESOURCENOTFOUND", - Message: "Source directory '%s' could not be found: %v", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeSourceNotDirectory is generated the source is not a directory (Windows specific) - ErrorCodeVolumeSourceNotDirectory = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMESOURCENOTDIRECTORY", - Message: "Source '%s' is not a directory", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeFromBlank is generated when path to a volume is blank. - ErrorCodeVolumeFromBlank = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEFROMBLANK", - Message: "malformed volumes-from specification: %s", - Description: "An invalid 'destination' path was specified in the mount request, it must not be blank", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeDup is generated when we try to mount two volumes - // to the same path. - ErrorCodeVolumeDup = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMEDUP", - Message: "Duplicate bind mount %s", - Description: "An attempt was made to mount a volume but the specified destination location is already used in a previous mount", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeNoSourceForMount is generated when no source directory - // for a volume mount was found. (Windows specific) - ErrorCodeVolumeNoSourceForMount = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMENOSOURCEFORMOUNT", - Message: "No source for mount name %q driver %q destination %s", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeVolumeNameReservedWord is generated when the name in a volume - // uses a reserved word for filenames. (Windows specific) - ErrorCodeVolumeNameReservedWord = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUMENAMERESERVEDWORD", - Message: "Volume name %q cannot be a reserved word for Windows filenames", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeCantUnpause is generated when there's an error while trying - // to unpause a container. - ErrorCodeCantUnpause = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CANTUNPAUSE", - Message: "Cannot unpause container %s: %s", - Description: "An error occurred while trying to unpause the specified container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodePSError is generated when trying to run 'ps'. - ErrorCodePSError = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "PSError", - Message: "Error running ps: %s", - Description: "There was an error trying to run the 'ps' command in the specified container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNoPID is generated when looking for the PID field in the - // ps output. - ErrorCodeNoPID = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOPID", - Message: "Couldn't find PID field in ps output", - Description: "There was no 'PID' field in the output of the 'ps' command that was executed", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeBadPID is generated when we can't convert a PID to an int. - ErrorCodeBadPID = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BADPID", - Message: "Unexpected pid '%s': %s", - Description: "While trying to parse the output of the 'ps' command, the 'PID' field was not an integer", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNoTop is generated when we try to run 'top' but can't - // because we're on windows. - ErrorCodeNoTop = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOTOP", - Message: "Top is not supported on Windows", - Description: "The 'top' command is not supported on Windows", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeStopped is generated when we try to stop a container - // that is already stopped. - ErrorCodeStopped = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "STOPPED", - Message: "Container already stopped", - Description: "An attempt was made to stop a container, but the container is already stopped", - HTTPStatusCode: http.StatusNotModified, - }) - - // ErrorCodeCantStop is generated when we try to stop a container - // but failed for some reason. - ErrorCodeCantStop = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CANTSTOP", - Message: "Cannot stop container %s: %s\n", - Description: "An error occurred while tring to stop the specified container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeBadCPUFields is generated when the number of CPU fields is - // less than 8. - ErrorCodeBadCPUFields = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BADCPUFIELDS", - Message: "invalid number of cpu fields", - Description: "While reading the '/proc/stat' file, the number of 'cpu' fields is less than 8", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeBadCPUInt is generated the CPU field can't be parsed as an int. - ErrorCodeBadCPUInt = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BADCPUINT", - Message: "Unable to convert value %s to int: %s", - Description: "While reading the '/proc/stat' file, the 'CPU' field could not be parsed as an integer", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeBadStatFormat is generated the output of the stat info - // isn't parseable. - ErrorCodeBadStatFormat = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BADSTATFORMAT", - Message: "invalid stat format", - Description: "There was an error trying to parse the '/proc/stat' file", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeTimedOut is generated when a timer expires. - ErrorCodeTimedOut = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "TIMEDOUT", - Message: "Timed out: %v", - Description: "A timer expired", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeAlreadyRemoving is generated when we try to remove a - // container that is already being removed. - ErrorCodeAlreadyRemoving = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "ALREADYREMOVING", - Message: "Status is already RemovalInProgress", - Description: "An attempt to remove a container was made, but the container is already in the process of being removed", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeStartPaused is generated when we start a paused container. - ErrorCodeStartPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "STARTPAUSED", - Message: "Cannot start a paused container, try unpause instead.", - Description: "An attempt to start a container was made, but the container is paused. Unpause it first", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeAlreadyStarted is generated when we try to start a container - // that is already running. - ErrorCodeAlreadyStarted = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "ALREADYSTARTED", - Message: "Container already started", - Description: "An attempt to start a container was made, but the container is already started", - HTTPStatusCode: http.StatusNotModified, - }) - - // ErrorCodeHostConfigStart is generated when a HostConfig is passed - // into the start command. - ErrorCodeHostConfigStart = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "HOSTCONFIGSTART", - Message: "Supplying a hostconfig on start is not supported. It should be supplied on create", - Description: "The 'start' command does not accept 'HostConfig' data, try using the 'create' command instead", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeCantStart is generated when an error occurred while - // trying to start a container. - ErrorCodeCantStart = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CANTSTART", - Message: "Cannot start container %s: %s", - Description: "There was an error while trying to start a container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeCantRestart is generated when an error occurred while - // trying to restart a container. - ErrorCodeCantRestart = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CANTRESTART", - Message: "Cannot restart container %s: %s", - Description: "There was an error while trying to restart a container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeEmptyRename is generated when one of the names on a - // rename is empty. - ErrorCodeEmptyRename = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EMPTYRENAME", - Message: "Neither old nor new names may be empty", - Description: "An attempt was made to rename a container but either the old or new names were blank", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRenameTaken is generated when we try to rename but the - // new name isn't available. - ErrorCodeRenameTaken = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RENAMETAKEN", - Message: "Error when allocating new name: %s", - Description: "The new name specified on the 'rename' command is already being used", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRenameDelete is generated when we try to rename but - // failed trying to delete the old container. - ErrorCodeRenameDelete = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RENAMEDELETE", - Message: "Failed to delete container %q: %v", - Description: "There was an error trying to delete the specified container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodePauseError is generated when we try to pause a container - // but failed. - ErrorCodePauseError = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "PAUSEERROR", - Message: "Cannot pause container %s: %s", - Description: "There was an error trying to pause the specified container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNeedStream is generated when we try to stream a container's - // logs but no output stream was specified. - ErrorCodeNeedStream = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NEEDSTREAM", - Message: "You must choose at least one stream", - Description: "While trying to stream a container's logs, no output stream was specified", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeDanglingOne is generated when we try to specify more than one - // 'dangling' specifier. - ErrorCodeDanglingOne = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DANLGINGONE", - Message: "Conflict: cannot use more than 1 value for `dangling` filter", - Description: "The specified 'dangling' filter may not have more than one value", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeImgDelUsed is generated when we try to delete an image - // but it is being used. - ErrorCodeImgDelUsed = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "IMGDELUSED", - Message: "conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", - Description: "An attempt was made to delete an image but it is currently being used", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeImgNoParent is generated when we try to find an image's - // parent but its not in the graph. - ErrorCodeImgNoParent = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "IMGNOPARENT", - Message: "unable to get parent image: %v", - Description: "There was an error trying to find an image's parent, it was not in the graph", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeExportFailed is generated when an export fails. - ErrorCodeExportFailed = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXPORTFAILED", - Message: "%s: %s", - Description: "There was an error during an export operation", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeExecResize is generated when we try to resize an exec - // but its not running. - ErrorCodeExecResize = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXECRESIZE", - Message: "Exec %s is not running, so it can not be resized.", - Description: "An attempt was made to resize an 'exec', but the 'exec' is not running", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeContainerNotRunning is generated when we try to get the info - // on an exec but the container is not running. - ErrorCodeContainerNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CONTAINERNOTRUNNING", - Message: "Container %s is not running: %s", - Description: "An attempt was made to retrieve the information about an 'exec' but the container is not running", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeNoExecID is generated when we try to get the info - // on an exec but it can't be found. - ErrorCodeNoExecID = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOEXECID", - Message: "No such exec instance '%s' found in daemon", - Description: "The specified 'exec' instance could not be found", - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeExecPaused is generated when we try to start an exec - // but the container is paused. - ErrorCodeExecPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXECPAUSED", - Message: "Container %s is paused, unpause the container before exec", - Description: "An attempt to start an 'exec' was made, but the owning container is paused", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeExecRunning is generated when we try to start an exec - // but its already running. - ErrorCodeExecRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXECRUNNING", - Message: "Error: Exec command %s is already running", - Description: "An attempt to start an 'exec' was made, but 'exec' is already running", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeExecCantRun is generated when we try to start an exec - // but it failed for some reason. - ErrorCodeExecCantRun = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXECCANTRUN", - Message: "Cannot run exec command %s in container %s: %s", - Description: "An attempt to start an 'exec' was made, but an error occurred", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeExecAttach is generated when we try to attach to an exec - // but failed. - ErrorCodeExecAttach = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXECATTACH", - Message: "attach failed with error: %s", - Description: "There was an error while trying to attach to an 'exec'", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeExecContainerStopped is generated when we try to start - // an exec but then the container stopped. - ErrorCodeExecContainerStopped = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "EXECCONTAINERSTOPPED", - Message: "container stopped while running exec", - Description: "An attempt was made to start an 'exec' but the owning container is in the 'stopped' state", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeDefaultName is generated when we try to delete the - // default name of a container. - ErrorCodeDefaultName = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DEFAULTNAME", - Message: "Conflict, cannot remove the default name of the container", - Description: "An attempt to delete the default name of a container was made, but that is not allowed", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeNoParent is generated when we try to delete a container - // but we can't find its parent image. - ErrorCodeNoParent = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOPARENT", - Message: "Cannot get parent %s for name %s", - Description: "An attempt was made to delete a container but its parent image could not be found", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeCantDestroy is generated when we try to delete a container - // but failed for some reason. - ErrorCodeCantDestroy = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "CANTDESTROY", - Message: "Cannot destroy container %s: %v", - Description: "An attempt was made to delete a container but it failed", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmRunning is generated when we try to delete a container - // but its still running. - ErrorCodeRmRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMRUNNING", - Message: "Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f", - Description: "An attempt was made to delete a container but the container is still running, try to either stop it first or use '-f'", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeRmFailed is generated when we try to delete a container - // but it failed for some reason. - ErrorCodeRmFailed = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMFAILED", - Message: "Could not kill running container, cannot remove - %v", - Description: "An error occurred while trying to delete a running container", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmNotFound is generated when we try to delete a container - // but couldn't find it. - ErrorCodeRmNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMNOTFOUND", - Message: "Could not kill running container, cannot remove - %v", - Description: "An attempt to delete a container was made but the container could not be found", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmState is generated when we try to delete a container - // but couldn't set its state to RemovalInProgress. - ErrorCodeRmState = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMSTATE", - Message: "Failed to set container state to RemovalInProgress: %s", - Description: "An attempt to delete a container was made, but there as an error trying to set its state to 'RemovalInProgress'", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmDriverFS is generated when we try to delete a container - // but the driver failed to delete its filesystem. - ErrorCodeRmDriverFS = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMDRIVERFS", - Message: "Driver %s failed to remove root filesystem %s: %s", - Description: "While trying to delete a container, the driver failed to remove the root filesystem", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmInit is generated when we try to delete a container - // but failed deleting its init filesystem. - ErrorCodeRmInit = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMINIT", - Message: "Driver %s failed to remove init filesystem %s: %s", - Description: "While trying to delete a container, the driver failed to remove the init filesystem", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmFS is generated when we try to delete a container - // but failed deleting its filesystem. - ErrorCodeRmFS = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMFS", - Message: "Unable to remove filesystem for %v: %v", - Description: "While trying to delete a container, the driver failed to remove the filesystem", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmExecDriver is generated when we try to delete a container - // but failed deleting its exec driver data. - ErrorCodeRmExecDriver = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMEXECDRIVER", - Message: "Unable to remove execdriver data for %s: %s", - Description: "While trying to delete a container, there was an error trying to remove th exec driver data", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeRmVolumeInUse is generated when we try to delete a container - // but failed deleting a volume because its being used. - ErrorCodeRmVolumeInUse = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMVOLUMEINUSE", - Message: "Conflict: %v", - Description: "While trying to delete a container, one of its volumes is still being used", - HTTPStatusCode: http.StatusConflict, - }) - - // ErrorCodeRmVolume is generated when we try to delete a container - // but failed deleting a volume. - ErrorCodeRmVolume = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMVOLUME", - Message: "Error while removing volume %s: %v", - Description: "While trying to delete a container, there was an error trying to delete one of its volumes", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeInvalidCpusetCpus is generated when user provided cpuset CPUs - // are invalid. - ErrorCodeInvalidCpusetCpus = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "INVALIDCPUSETCPUS", - Message: "Invalid value %s for cpuset cpus.", - Description: "While verifying the container's 'HostConfig', CpusetCpus value was in an incorrect format", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeInvalidCpusetMems is generated when user provided cpuset mems - // are invalid. - ErrorCodeInvalidCpusetMems = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "INVALIDCPUSETMEMS", - Message: "Invalid value %s for cpuset mems.", - Description: "While verifying the container's 'HostConfig', CpusetMems value was in an incorrect format", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNotAvailableCpusetCpus is generated when user provided cpuset - // CPUs aren't available in the container's cgroup. - ErrorCodeNotAvailableCpusetCpus = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOTAVAILABLECPUSETCPUS", - Message: "Requested CPUs are not available - requested %s, available: %s.", - Description: "While verifying the container's 'HostConfig', cpuset CPUs provided aren't available in the container's cgroup available set", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeNotAvailableCpusetMems is generated when user provided cpuset - // memory nodes aren't available in the container's cgroup. - ErrorCodeNotAvailableCpusetMems = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NOTAVAILABLECPUSETMEMS", - Message: "Requested memory nodes are not available - requested %s, available: %s.", - Description: "While verifying the container's 'HostConfig', cpuset memory nodes provided aren't available in the container's cgroup available set", - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorVolumeNameTaken is generated when an error occurred while - // trying to create a volume that has existed using different driver. - ErrorVolumeNameTaken = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "VOLUME_NAME_TAKEN", - Message: "A volume name %s already exists with the %s driver. Choose a different volume name.", - Description: "An attempt to create a volume using a driver but the volume already exists with a different driver", - HTTPStatusCode: http.StatusInternalServerError, - }) -) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/error.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/error.go deleted file mode 100644 index 37222d443..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/error.go +++ /dev/null @@ -1,6 +0,0 @@ -package errors - -// This file contains all of the errors that can be generated from the -// docker engine but are not tied to any specific top-level component. - -const errGroup = "engine" diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/image.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/image.go deleted file mode 100644 index c104b1bb7..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/image.go +++ /dev/null @@ -1,20 +0,0 @@ -package errors - -// This file contains all of the errors that can be generated from the -// docker/image component. - -import ( - "net/http" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode" -) - -var ( - // ErrorCodeInvalidImageID is generated when image id specified is incorrectly formatted. - ErrorCodeInvalidImageID = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "INVALIDIMAGEID", - Message: "image ID '%s' is invalid ", - Description: "The specified image id is incorrectly formatted", - HTTPStatusCode: http.StatusInternalServerError, - }) -) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/server.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/server.go deleted file mode 100644 index 580b47f74..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/server.go +++ /dev/null @@ -1,36 +0,0 @@ -package errors - -import ( - "net/http" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode" -) - -var ( - // ErrorCodeNewerClientVersion is generated when a request from a client - // specifies a higher version than the server supports. - ErrorCodeNewerClientVersion = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NEWERCLIENTVERSION", - Message: "client is newer than server (client API version: %s, server API version: %s)", - Description: "The client version is higher than the server version", - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeOldClientVersion is generated when a request from a client - // specifies a version lower than the minimum version supported by the server. - ErrorCodeOldClientVersion = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "OLDCLIENTVERSION", - Message: "client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", - Description: "The client version is too old for the server", - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorNetworkControllerNotEnabled is generated when the networking stack in not enabled - // for certain platforms, like windows. - ErrorNetworkControllerNotEnabled = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NETWORK_CONTROLLER_NOT_ENABLED", - Message: "the network controller is not enabled for this platform", - Description: "Docker's networking stack is disabled for this platform", - HTTPStatusCode: http.StatusNotFound, - }) -) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go deleted file mode 100644 index ba8b4f201..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go +++ /dev/null @@ -1,67 +0,0 @@ -package opts - -import ( - "bufio" - "fmt" - "os" - "strings" -) - -// ParseEnvFile reads a file with environment variables enumerated by lines -// -// ``Environment variable names used by the utilities in the Shell and -// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase -// letters, digits, and the '_' (underscore) from the characters defined in -// Portable Character Set and do not begin with a digit. *But*, other -// characters may be permitted by an implementation; applications shall -// tolerate the presence of such names.'' -// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html -// -// As of #16585, it's up to application inside docker to validate or not -// environment variables, that's why we just strip leading whitespace and -// nothing more. -func ParseEnvFile(filename string) ([]string, error) { - fh, err := os.Open(filename) - if err != nil { - return []string{}, err - } - defer fh.Close() - - lines := []string{} - scanner := bufio.NewScanner(fh) - for scanner.Scan() { - // trim the line from all leading whitespace first - line := strings.TrimLeft(scanner.Text(), whiteSpaces) - // line is not empty, and not starting with '#' - if len(line) > 0 && !strings.HasPrefix(line, "#") { - data := strings.SplitN(line, "=", 2) - - // trim the front of a variable, but nothing else - variable := strings.TrimLeft(data[0], whiteSpaces) - if strings.ContainsAny(variable, whiteSpaces) { - return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} - } - - if len(data) > 1 { - - // pass the value through, no trimming - lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) - } else { - // if only a pass-through variable is given, clean it up. - lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) - } - } - } - return lines, scanner.Err() -} - -var whiteSpaces = " \t" - -// ErrBadEnvVariable typed error for bad environment variable -type ErrBadEnvVariable struct { - msg string -} - -func (e ErrBadEnvVariable) Error() string { - return fmt.Sprintf("poorly formatted environment: %s", e.msg) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go deleted file mode 100644 index 611407a9d..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package opts - -import "fmt" - -// DefaultHost constant defines the default host string used by docker on other hosts than Windows -var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go deleted file mode 100644 index ec52e9a70..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build windows - -package opts - -// DefaultHost constant defines the default host string used by docker on Windows -var DefaultHost = DefaultTCPHost diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go deleted file mode 100644 index d787b56ca..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go +++ /dev/null @@ -1,42 +0,0 @@ -package opts - -import ( - "fmt" - "net" -) - -// IPOpt holds an IP. It is used to store values from CLI flags. -type IPOpt struct { - *net.IP -} - -// NewIPOpt creates a new IPOpt from a reference net.IP and a -// string representation of an IP. If the string is not a valid -// IP it will fallback to the specified reference. -func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { - o := &IPOpt{ - IP: ref, - } - o.Set(defaultVal) - return o -} - -// Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parsable as an IP address it returns an error. -func (o *IPOpt) Set(val string) error { - ip := net.ParseIP(val) - if ip == nil { - return fmt.Errorf("%s is not an ip address", val) - } - *o.IP = ip - return nil -} - -// String returns the IP address stored in the IPOpt. If stored IP is a -// nil pointer, it returns an empty string. -func (o *IPOpt) String() string { - if *o.IP == nil { - return "" - } - return o.IP.String() -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go deleted file mode 100644 index df85a09e3..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go +++ /dev/null @@ -1,360 +0,0 @@ -package opts - -import ( - "fmt" - "net" - "os" - "path" - "regexp" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers" -) - -var ( - alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) - domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) - // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 - DefaultHTTPHost = "localhost" - - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// - // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter - // is not supplied. A better longer term solution would be to use a named - // pipe as the default on the Windows daemon. - // These are the IANA registered port numbers for use with Docker - // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker - DefaultHTTPPort = 2375 // Default HTTP Port - // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled - DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port - // DefaultUnixSocket Path for the unix socket. - // Docker daemon by default always listens on the default unix socket - DefaultUnixSocket = "/var/run/docker.sock" - // DefaultTCPHost constant defines the default host string used by docker on Windows - DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) - // DefaultTLSHost constant defines the default host string used by docker for TLS sockets - DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) -) - -// ListOpts holds a list of values and a validation function. -type ListOpts struct { - values *[]string - validator ValidatorFctType -} - -// NewListOpts creates a new ListOpts with the specified validator. -func NewListOpts(validator ValidatorFctType) ListOpts { - var values []string - return *NewListOptsRef(&values, validator) -} - -// NewListOptsRef creates a new ListOpts with the specified values and validator. -func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { - return &ListOpts{ - values: values, - validator: validator, - } -} - -func (opts *ListOpts) String() string { - return fmt.Sprintf("%v", []string((*opts.values))) -} - -// Set validates if needed the input value and add it to the -// internal slice. -func (opts *ListOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - (*opts.values) = append((*opts.values), value) - return nil -} - -// Delete removes the specified element from the slice. -func (opts *ListOpts) Delete(key string) { - for i, k := range *opts.values { - if k == key { - (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) - return - } - } -} - -// GetMap returns the content of values in a map in order to avoid -// duplicates. -func (opts *ListOpts) GetMap() map[string]struct{} { - ret := make(map[string]struct{}) - for _, k := range *opts.values { - ret[k] = struct{}{} - } - return ret -} - -// GetAll returns the values of slice. -func (opts *ListOpts) GetAll() []string { - return (*opts.values) -} - -// Get checks the existence of the specified key. -func (opts *ListOpts) Get(key string) bool { - for _, k := range *opts.values { - if k == key { - return true - } - } - return false -} - -// Len returns the amount of element in the slice. -func (opts *ListOpts) Len() int { - return len((*opts.values)) -} - -//MapOpts holds a map of values and a validation function. -type MapOpts struct { - values map[string]string - validator ValidatorFctType -} - -// Set validates if needed the input value and add it to the -// internal map, by splitting on '='. -func (opts *MapOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - vals := strings.SplitN(value, "=", 2) - if len(vals) == 1 { - (opts.values)[vals[0]] = "" - } else { - (opts.values)[vals[0]] = vals[1] - } - return nil -} - -// GetAll returns the values of MapOpts as a map. -func (opts *MapOpts) GetAll() map[string]string { - return opts.values -} - -func (opts *MapOpts) String() string { - return fmt.Sprintf("%v", map[string]string((opts.values))) -} - -// NewMapOpts creates a new MapOpts with the specified map of values and a validator. -func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { - if values == nil { - values = make(map[string]string) - } - return &MapOpts{ - values: values, - validator: validator, - } -} - -// ValidatorFctType defines a validator function that returns a validated string and/or an error. -type ValidatorFctType func(val string) (string, error) - -// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error -type ValidatorFctListType func(val string) ([]string, error) - -// ValidateAttach validates that the specified string is a valid attach option. -func ValidateAttach(val string) (string, error) { - s := strings.ToLower(val) - for _, str := range []string{"stdin", "stdout", "stderr"} { - if s == str { - return s, nil - } - } - return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") -} - -// ValidateLink validates that the specified string has a valid link format (containerName:alias). -func ValidateLink(val string) (string, error) { - if _, _, err := parsers.ParseLink(val); err != nil { - return val, err - } - return val, nil -} - -// ValidDeviceMode checks if the mode for device is valid or not. -// Valid mode is a composition of r (read), w (write), and m (mknod). -func ValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ - 'r': true, - 'w': true, - 'm': true, - } - if mode == "" { - return false - } - for _, c := range mode { - if !legalDeviceMode[c] { - return false - } - legalDeviceMode[c] = false - } - return true -} - -// ValidateDevice validates a path for devices -// It will make sure 'val' is in the form: -// [host-dir:]container-path[:mode] -// It also validates the device mode. -func ValidateDevice(val string) (string, error) { - return validatePath(val, ValidDeviceMode) -} - -func validatePath(val string, validator func(string) bool) (string, error) { - var containerPath string - var mode string - - if strings.Count(val, ":") > 2 { - return val, fmt.Errorf("bad format for path: %s", val) - } - - split := strings.SplitN(val, ":", 3) - if split[0] == "" { - return val, fmt.Errorf("bad format for path: %s", val) - } - switch len(split) { - case 1: - containerPath = split[0] - val = path.Clean(containerPath) - case 2: - if isValid := validator(split[1]); isValid { - containerPath = split[0] - mode = split[1] - val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) - } else { - containerPath = split[1] - val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) - } - case 3: - containerPath = split[1] - mode = split[2] - if isValid := validator(split[2]); !isValid { - return val, fmt.Errorf("bad mode specified: %s", mode) - } - val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) - } - - if !path.IsAbs(containerPath) { - return val, fmt.Errorf("%s is not an absolute path", containerPath) - } - return val, nil -} - -// ValidateEnv validates an environment variable and returns it. -// If no value is specified, it returns the current value using os.Getenv. -// -// As on ParseEnvFile and related to #16585, environment variable names -// are not validate what so ever, it's up to application inside docker -// to validate them or not. -func ValidateEnv(val string) (string, error) { - arr := strings.Split(val, "=") - if len(arr) > 1 { - return val, nil - } - if !doesEnvExist(val) { - return val, nil - } - return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil -} - -// ValidateIPAddress validates an Ip address. -func ValidateIPAddress(val string) (string, error) { - var ip = net.ParseIP(strings.TrimSpace(val)) - if ip != nil { - return ip.String(), nil - } - return "", fmt.Errorf("%s is not an ip address", val) -} - -// ValidateMACAddress validates a MAC address. -func ValidateMACAddress(val string) (string, error) { - _, err := net.ParseMAC(strings.TrimSpace(val)) - if err != nil { - return "", err - } - return val, nil -} - -// ValidateDNSSearch validates domain for resolvconf search configuration. -// A zero length domain is represented by a dot (.). -func ValidateDNSSearch(val string) (string, error) { - if val = strings.Trim(val, " "); val == "." { - return val, nil - } - return validateDomain(val) -} - -func validateDomain(val string) (string, error) { - if alphaRegexp.FindString(val) == "" { - return "", fmt.Errorf("%s is not a valid domain", val) - } - ns := domainRegexp.FindSubmatch([]byte(val)) - if len(ns) > 0 && len(ns[1]) < 255 { - return string(ns[1]), nil - } - return "", fmt.Errorf("%s is not a valid domain", val) -} - -// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). -func ValidateExtraHost(val string) (string, error) { - // allow for IPv6 addresses in extra hosts by only splitting on first ":" - arr := strings.SplitN(val, ":", 2) - if len(arr) != 2 || len(arr[0]) == 0 { - return "", fmt.Errorf("bad format for add-host: %q", val) - } - if _, err := ValidateIPAddress(arr[1]); err != nil { - return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) - } - return val, nil -} - -// ValidateLabel validates that the specified string is a valid label, and returns it. -// Labels are in the form on key=value. -func ValidateLabel(val string) (string, error) { - if strings.Count(val, "=") < 1 { - return "", fmt.Errorf("bad attribute format: %s", val) - } - return val, nil -} - -// ValidateHost validates that the specified string is a valid host and returns it. -func ValidateHost(val string) (string, error) { - _, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val) - if err != nil { - return val, err - } - // Note: unlike most flag validators, we don't return the mutated value here - // we need to know what the user entered later (using ParseHost) to adjust for tls - return val, nil -} - -// ParseHost and set defaults for a Daemon host string -func ParseHost(defaultHost, val string) (string, error) { - host, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val) - if err != nil { - return val, err - } - return host, nil -} - -func doesEnvExist(name string) bool { - for _, entry := range os.Environ() { - parts := strings.SplitN(entry, "=", 2) - if parts[0] == name { - return true - } - } - return false -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go deleted file mode 100644 index 7cd480791..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go +++ /dev/null @@ -1,52 +0,0 @@ -package opts - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit" -) - -// UlimitOpt defines a map of Ulimits -type UlimitOpt struct { - values *map[string]*ulimit.Ulimit -} - -// NewUlimitOpt creates a new UlimitOpt -func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt { - if ref == nil { - ref = &map[string]*ulimit.Ulimit{} - } - return &UlimitOpt{ref} -} - -// Set validates a Ulimit and sets its name as a key in UlimitOpt -func (o *UlimitOpt) Set(val string) error { - l, err := ulimit.Parse(val) - if err != nil { - return err - } - - (*o.values)[l.Name] = l - - return nil -} - -// String returns Ulimit values as a string. -func (o *UlimitOpt) String() string { - var out []string - for _, v := range *o.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to Ulimits. -func (o *UlimitOpt) GetList() []*ulimit.Ulimit { - var ulimits []*ulimit.Ulimit - for _, v := range *o.values { - ulimits = append(ulimits, v) - } - - return ulimits -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md deleted file mode 100644 index 7307d9694..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md +++ /dev/null @@ -1 +0,0 @@ -This code provides helper functions for dealing with archive files. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go deleted file mode 100644 index fb3327f12..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go +++ /dev/null @@ -1,1016 +0,0 @@ -package archive - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "syscall" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -type ( - // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. - Archive io.ReadCloser - // Reader is a type of io.Reader. - Reader io.Reader - // Compression is the state represtents if compressed or not. - Compression int - // TarChownOptions wraps the chown options UID and GID. - TarChownOptions struct { - UID, GID int - } - // TarOptions wraps the tar options. - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - NoLchown bool - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ChownOpts *TarChownOptions - IncludeSourceDir bool - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - } - - // Archiver allows the reuse of most utility functions of this package - // with a pluggable Untar function. Also, to facilitate the passing of - // specific id mappings for untar, an archiver can be created with maps - // which will then be passed to Untar operations - Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - } - - // breakoutError is used to differentiate errors related to breaking out - // When testing archive breakout in the unit tests, this error is expected - // in order for the test to pass. - breakoutError error -) - -var ( - // ErrNotImplemented is the error message of function not implemented. - ErrNotImplemented = errors.New("Function not implemented") - defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} -) - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Bzip2 is bzip2 compression algorithm. - Bzip2 - // Gzip is gzip compression algorithm. - Gzip - // Xz is xz compression algorithm. - Xz -) - -// IsArchive checks if it is a archive by the header. -func IsArchive(header []byte) bool { - compression := DetectCompression(header) - if compression != Uncompressed { - return true - } - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Bzip2: {0x42, 0x5A, 0x68}, - Gzip: {0x1F, 0x8B, 0x08}, - Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - if len(source) < len(m) { - logrus.Debugf("Len too short") - continue - } - if bytes.Compare(m, source[:len(m)]) == 0 { - return compression - } - } - return Uncompressed -} - -func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return cmdStream(exec.Command(args[0], args[1:]...), archive) -} - -// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - p := pools.BufioReader32KPool - buf := p.Get(archive) - bs, err := buf.Peek(10) - if err != nil { - return nil, err - } - - compression := DetectCompression(bs) - switch compression { - case Uncompressed: - readBufWrapper := p.NewReadCloserWrapper(buf, buf) - return readBufWrapper, nil - case Gzip: - gzReader, err := gzip.NewReader(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) - return readBufWrapper, nil - case Bzip2: - bz2Reader := bzip2.NewReader(buf) - readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) - return readBufWrapper, nil - case Xz: - xzReader, chdone, err := xzDecompress(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { - <-chdone - return readBufWrapper.Close() - }), nil - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - } - return "" -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap -} - -// canonicalTarName provides a platform-independent and consistent posix-style -//path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) (string, error) { - name, err := CanonicalTarNameForPath(name) - if err != nil { - return "", err - } - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name, nil -} - -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - link := "" - if fi.Mode()&os.ModeSymlink != 0 { - if link, err = os.Readlink(path); err != nil { - return err - } - } - - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return err - } - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - - inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) - if err != nil { - return err - } - - // if it's not a directory and has more than 1 link, - // it's hardlinked, so set the type flag accordingly - if !fi.IsDir() && hasHardlinks(fi) { - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - - //handle re-mapping container ID mappings back to host ID mappings before - //writing tar headers/files - if ta.UIDMaps != nil || ta.GIDMaps != nil { - uid, gid, err := getFileUIDGID(fi.Sys()) - if err != nil { - return err - } - xUID, err := idtools.ToContainer(uid, ta.UIDMaps) - if err != nil { - return err - } - xGID, err := idtools.ToContainer(gid, ta.GIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - hdr.Gid = xGID - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg { - file, err := os.Open(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg, tar.TypeRegA: - // Source is regular file - file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(file, reader); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeLink: - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) - } - if err := os.Link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - logrus.Debugf("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) - } - - // Lchown is not supported on Windows. - if Lchown && runtime.GOOS != "windows" { - if chownOpts == nil { - chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} - } - if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - return err - } - } - - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - return err - } - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if err := handleLChmod(hdr, path, hdrInfo); err != nil { - return err - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarWithOptions(path, &TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - - // Fix the source path to work with long path names. This is a no-op - // on platforms other than Windows. - srcPath = fixVolumePathPrefix(srcPath) - - patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) - - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(compressWriter), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: options.UIDMaps, - GIDMaps: options.GIDMaps, - } - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - logrus.Debugf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - logrus.Debugf("Can't close pipe writer: %s", err) - } - }() - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(srcPath) - srcPath = dir - options.IncludeFiles = []string{base} - } - - if len(options.IncludeFiles) == 0 { - options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - for _, include := range options.IncludeFiles { - rebaseName := options.RebaseNames[include] - - walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) - if err != nil { - logrus.Debugf("Error matching %s: %v", relFilePath, err) - return err - } - } - - if skip { - if !exceptions && f.IsDir() { - return filepath.SkipDir - } - return nil - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } - - relFilePath = strings.Replace(relFilePath, include, replacement, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", filePath, err) - } - return nil - }) - } - }() - - return pipeReader, nil -} - -// Unpack unpacks the decompressedArchive to dest with options. -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - tr := tar.NewReader(decompressedArchive) - trBuf := pools.BufioReader32KPool.Get(nil) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return err - } - - // Iterate through the files in the archive. -loop: - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // Normalize name, for safety and for a simple is-root check - // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: - // This keeps "..\" as-is, but normalizes "\..\" to "\". - hdr.Name = filepath.Clean(hdr.Name) - - for _, exclude := range options.ExcludePatterns { - if strings.HasPrefix(hdr.Name, exclude) { - continue loop - } - } - - // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in - // the filepath format for the OS on which the daemon is running. Hence - // the check for a slash-suffix MUST be done in an OS-agnostic way. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0777) - if err != nil { - return err - } - } - } - - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing directory with a non-directory from the archive. - return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) - } - - if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing non-directory with a directory from the archive. - return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) - } - - if fi.IsDir() && hdr.Name == "." { - continue - } - - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - trBuf.Reset(tr) - - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if hdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if hdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID - } - - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } - return nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - dest = filepath.Clean(dest) - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - r := tarArchive - if decompress { - decompressedArchive, err := DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return Unpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - logrus.Debugf("TarUntar(%s %s)", src, dst) - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } - } - return archiver.Untar(archive, dst, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return defaultArchiver.TarUntar(src, dst) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } - } - if err := archiver.Untar(archive, dst, options); err != nil { - return err - } - return nil -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return defaultArchiver.UntarPath(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - // Create dst, copy src's content into it - logrus.Debugf("Creating dest directory: %s", dst) - if err := system.MkdirAll(dst, 0755); err != nil { - return err - } - logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) - return archiver.TarUntar(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return defaultArchiver.CopyWithTar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) - srcSt, err := os.Stat(src) - if err != nil { - return err - } - - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - - // Clean up the trailing slash. This must be done in an operating - // system specific manner. - if dst[len(dst)-1] == os.PathSeparator { - dst = filepath.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { - return err - } - - r, w := io.Pipe() - errC := promise.Go(func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) - if err != nil { - return err - } - - // only perform mapping if the file being copied isn't already owned by the - // uid or gid of the remapped root in the container - if remappedRootUID != hdr.Uid { - xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if remappedRootGID != hdr.Gid { - xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID - } - - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }) - defer func() { - if er := <-errC; err != nil { - err = er - } - }() - - err = archiver.Untar(r, filepath.Dir(dst), nil) - if err != nil { - r.CloseWithError(err) - } - return err -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// Destination handling is in an operating specific manner depending -// where the daemon is running. If `dst` ends with a trailing slash -// the final destination path will be `dst/base(src)` (Linux) or -// `dst\base(src)` (Windows). -func CopyFileWithTar(src, dst string) (err error) { - return defaultArchiver.CopyFileWithTar(src, dst) -} - -// cmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { - chdone := make(chan struct{}) - cmd.Stdin = input - pipeR, pipeW := io.Pipe() - cmd.Stdout = pipeW - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, nil, err - } - - // Copy stdout to the returned pipe - go func() { - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - pipeW.Close() - } - close(chdone) - }() - - return pipeR, chdone, nil -} - -// NewTempArchive reads the content of src into a temporary file, and returns the contents -// of that file as an archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -func NewTempArchive(src Archive, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") - if err != nil { - return nil, err - } - if _, err := io.Copy(f, src); err != nil { - return nil, err - } - if _, err := f.Seek(0, 0); err != nil { - return nil, err - } - st, err := f.Stat() - if err != nil { - return nil, err - } - size := st.Size() - return &TempArchive{File: f, Size: size}, nil -} - -// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -type TempArchive struct { - *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 - closed bool -} - -// Close closes the underlying file if it's still open, or does a no-op -// to allow callers to try to close the TempArchive multiple times safely. -func (archive *TempArchive) Close() error { - if archive.closed { - return nil - } - - archive.closed = true - - return archive.File.Close() -} - -func (archive *TempArchive) Read(data []byte) (int, error) { - n, err := archive.File.Read(data) - archive.read += int64(n) - if err != nil || archive.read == archive.Size { - archive.Close() - os.Remove(archive.File.Name()) - } - return n, err -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go deleted file mode 100644 index 07693e37c..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go +++ /dev/null @@ -1,112 +0,0 @@ -// +build !windows - -package archive - -import ( - "archive/tar" - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a seperate function as this is platform specific. On Linux, we -// can't use filepath.Join(srcPath,include) because this will clean away -// a trailing "." or "/" which may be important. -func getWalkRoot(srcPath string, include string) string { - return srcPath + string(filepath.Separator) + include -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - return p, nil // already unix-style -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. - -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - err = errors.New("cannot convert stat value to syscall.Stat_t") - return - } - - inode = uint64(s.Ino) - - // Currently go does not fill in the major/minors - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) - hdr.Devminor = int64(minor(uint64(s.Rdev))) - } - - return -} - -func getFileUIDGID(stat interface{}) (int, int, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") - } - return int(s.Uid), int(s.Gid), nil -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= syscall.S_IFBLK - case tar.TypeChar: - mode |= syscall.S_IFCHR - case tar.TypeFifo: - mode |= syscall.S_IFIFO - } - - if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { - return err - } - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go deleted file mode 100644 index fbabc03a4..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build windows - -package archive - -import ( - "archive/tar" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return longpath.AddPrefix(srcPath) -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a seperate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - // windows: convert windows style relative path with backslashes - // into forward slashes. Since windows does not allow '/' or '\' - // in file names, it is mostly safe to replace however we must - // check just in case - if strings.Contains(p, "/") { - return "", fmt.Errorf("Windows path contains forward slash: %s", p) - } - return strings.Replace(p, string(os.PathSeparator), "/", -1), nil - -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - perm &= 0755 - // Add the x bit: make everything +x from windows - perm |= 0111 - - return perm -} - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows - return -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} - -func getFileUIDGID(stat interface{}) (int, int, error) { - // no notion of file ownership mapping yet on Windows - return 0, 0, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go deleted file mode 100644 index 12ec40163..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go +++ /dev/null @@ -1,413 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// ChangeType represents the change type. -type ChangeType int - -const ( - // ChangeModify represents the modify operation. - ChangeModify = iota - // ChangeAdd represents the add operation. - ChangeAdd - // ChangeDelete represents the delete operation. - ChangeDelete -) - -// Change represents a change, it wraps the change type and path. -// It describes changes of the files in the path respect to the -// parent layers. The change could be modify, add, delete. -// This is used for layer diff. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - var kind string - switch change.Kind { - case ChangeModify: - kind = "C" - case ChangeAdd: - kind = "A" - case ChangeDelete: - kind = "D" - } - return fmt.Sprintf("%s %s", kind, change.Path) -} - -// for sort.Sort -type changesByPath []Change - -func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } -func (c changesByPath) Len() int { return len(c) } -func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -// Gnu tar and the go tar writer don't have sub-second mtime -// precision, which is problematic when we apply changes via tar -// files, we handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a == b || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -func sameFsTimeSpec(a, b syscall.Timespec) bool { - return a.Sec == b.Sec && - (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) -} - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func Changes(layers []string, rw string) ([]Change, error) { - var ( - changes []Change - changedDirs = make(map[string]struct{}) - ) - - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - // Skip AUFS metadata - if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched { - return err - } - - change := Change{ - Path: path, - } - - // Find out what kind of modification happened - file := filepath.Base(path) - // If there is a whiteout, then the file was removed - if strings.HasPrefix(file, WhiteoutPrefix) { - originalFile := file[len(WhiteoutPrefix):] - change.Path = filepath.Join(filepath.Dir(path), originalFile) - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directoriy in the rw and ro layers are all equal. - // Check https://github.com/docker/docker/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if change.Kind == ChangeAdd || change.Kind == ChangeDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - changes = append(changes, Change{Path: parent, Kind: ChangeModify}) - changedDirs[parent] = struct{}{} - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -// FileInfo describes the information of a file. -type FileInfo struct { - parent *FileInfo - name string - stat *system.StatT - children map[string]*FileInfo - capability []byte - added bool -} - -// LookUp looks up the file information of a file. -func (info *FileInfo) LookUp(path string) *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - parent := info - if path == string(os.PathSeparator) { - return info - } - - pathElements := strings.Split(path, string(os.PathSeparator)) - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - // As this runs on the daemon side, file paths are OS specific. - return string(os.PathSeparator) - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - - sizeAtEntry := len(*changes) - - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - info.added = true - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild, _ := oldChildren[name] - if oldChild != nil { - // change? - oldStat := oldChild.stat - newStat := newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if statDifferent(oldStat, newStat) || - bytes.Compare(oldChild.capability, newChild.capability) != 0 { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - newChild.added = true - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - - // If there were changes inside this directory, we need to add it, even if the directory - // itself wasn't changed. This is needed to properly save and restore filesystem permissions. - // As this runs on the daemon side, file paths are OS specific. - if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { - change := Change{ - Path: info.path(), - Kind: ChangeModify, - } - // Let's insert the directory entry before the recently added entries located inside this dir - *changes = append(*changes, change) // just to resize the slice, will be overwritten - copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) - (*changes)[sizeAtEntry] = change - } - -} - -// Changes add changes to file information. -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - root := &FileInfo{ - name: string(os.PathSeparator), - children: make(map[string]*FileInfo), - } - return root -} - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) - if oldDir == "" { - emptyDir, err := ioutil.TempDir("", "empty") - if err != nil { - return nil, err - } - defer os.Remove(emptyDir) - oldDir = emptyDir - } - oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) - if err != nil { - return nil, err - } - - return newRoot.Changes(oldRoot), nil -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -func ChangesSize(newDir string, changes []Change) int64 { - var ( - size int64 - sf = make(map[uint64]struct{}) - ) - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, err := os.Lstat(file) - if err != nil { - logrus.Errorf("Can not stat %q: %s", file, err) - continue - } - - if fileInfo != nil && !fileInfo.IsDir() { - if hasHardlinks(fileInfo) { - inode := getIno(fileInfo) - if _, ok := sf[inode]; !ok { - size += fileInfo.Size() - sf[inode] = struct{}{} - } - } else { - size += fileInfo.Size() - } - } - } - } - return size -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { - reader, writer := io.Pipe() - go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: uidMaps, - GIDMaps: gidMaps, - } - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - sort.Sort(changesByPath(changes)) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) - timestamp := time.Now() - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: timestamp, - AccessTime: timestamp, - ChangeTime: timestamp, - } - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) - } - if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) - } - }() - return reader, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go deleted file mode 100644 index 378cc09c8..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go +++ /dev/null @@ -1,285 +0,0 @@ -package archive - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "sort" - "syscall" - "unsafe" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// walker is used to implement collectFileInfoForChanges on linux. Where this -// method in general returns the entire contents of two directory trees, we -// optimize some FS calls out on linux. In particular, we take advantage of the -// fact that getdents(2) returns the inode of each file in the directory being -// walked, which, when walking two trees in parallel to generate a list of -// changes, can be used to prune subtrees without ever having to lstat(2) them -// directly. Eliminating stat calls in this way can save up to seconds on large -// images. -type walker struct { - dir1 string - dir2 string - root1 *FileInfo - root2 *FileInfo -} - -// collectFileInfoForChanges returns a complete representation of the trees -// rooted at dir1 and dir2, with one important exception: any subtree or -// leaf where the inode and device numbers are an exact match between dir1 -// and dir2 will be pruned from the results. This method is *only* to be used -// to generating a list of changes between the two directories, as it does not -// reflect the full contents. -func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { - w := &walker{ - dir1: dir1, - dir2: dir2, - root1: newRootFileInfo(), - root2: newRootFileInfo(), - } - - i1, err := os.Lstat(w.dir1) - if err != nil { - return nil, nil, err - } - i2, err := os.Lstat(w.dir2) - if err != nil { - return nil, nil, err - } - - if err := w.walk("/", i1, i2); err != nil { - return nil, nil, err - } - - return w.root1, w.root2, nil -} - -// Given a FileInfo, its path info, and a reference to the root of the tree -// being constructed, register this file with the tree. -func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { - if fi == nil { - return nil - } - parent := root.LookUp(filepath.Dir(path)) - if parent == nil { - return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) - } - info := &FileInfo{ - name: filepath.Base(path), - children: make(map[string]*FileInfo), - parent: parent, - } - cpath := filepath.Join(dir, path) - stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) - if err != nil { - return err - } - info.stat = stat - info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - parent.children[info.name] = info - return nil -} - -// Walk a subtree rooted at the same path in both trees being iterated. For -// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d -func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { - // Register these nodes with the return trees, unless we're still at the - // (already-created) roots: - if path != "/" { - if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { - return err - } - if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { - return err - } - } - - is1Dir := i1 != nil && i1.IsDir() - is2Dir := i2 != nil && i2.IsDir() - - sameDevice := false - if i1 != nil && i2 != nil { - si1 := i1.Sys().(*syscall.Stat_t) - si2 := i2.Sys().(*syscall.Stat_t) - if si1.Dev == si2.Dev { - sameDevice = true - } - } - - // If these files are both non-existent, or leaves (non-dirs), we are done. - if !is1Dir && !is2Dir { - return nil - } - - // Fetch the names of all the files contained in both directories being walked: - var names1, names2 []nameIno - if is1Dir { - names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access - if err != nil { - return err - } - } - if is2Dir { - names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access - if err != nil { - return err - } - } - - // We have lists of the files contained in both parallel directories, sorted - // in the same order. Walk them in parallel, generating a unique merged list - // of all items present in either or both directories. - var names []string - ix1 := 0 - ix2 := 0 - - for { - if ix1 >= len(names1) { - break - } - if ix2 >= len(names2) { - break - } - - ni1 := names1[ix1] - ni2 := names2[ix2] - - switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { - case -1: // ni1 < ni2 -- advance ni1 - // we will not encounter ni1 in names2 - names = append(names, ni1.name) - ix1++ - case 0: // ni1 == ni2 - if ni1.ino != ni2.ino || !sameDevice { - names = append(names, ni1.name) - } - ix1++ - ix2++ - case 1: // ni1 > ni2 -- advance ni2 - // we will not encounter ni2 in names1 - names = append(names, ni2.name) - ix2++ - } - } - for ix1 < len(names1) { - names = append(names, names1[ix1].name) - ix1++ - } - for ix2 < len(names2) { - names = append(names, names2[ix2].name) - ix2++ - } - - // For each of the names present in either or both of the directories being - // iterated, stat the name under each root, and recurse the pair of them: - for _, name := range names { - fname := filepath.Join(path, name) - var cInfo1, cInfo2 os.FileInfo - if is1Dir { - cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if is2Dir { - cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if err = w.walk(fname, cInfo1, cInfo2); err != nil { - return err - } - } - return nil -} - -// {name,inode} pairs used to support the early-pruning logic of the walker type -type nameIno struct { - name string - ino uint64 -} - -type nameInoSlice []nameIno - -func (s nameInoSlice) Len() int { return len(s) } -func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } - -// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode -// numbers further up the stack when reading directory contents. Unlike -// os.Readdirnames, which returns a list of filenames, this function returns a -// list of {filename,inode} pairs. -func readdirnames(dirname string) (names []nameIno, err error) { - var ( - size = 100 - buf = make([]byte, 4096) - nbuf int - bufp int - nb int - ) - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - names = make([]nameIno, 0, size) // Empty with room to grow. - for { - // Refill the buffer if necessary - if bufp >= nbuf { - bufp = 0 - nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux - if nbuf < 0 { - nbuf = 0 - } - if err != nil { - return nil, os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - break // EOF - } - } - - // Drain the buffer - nb, names = parseDirent(buf[bufp:nbuf], names) - bufp += nb - } - - sl := nameInoSlice(names) - sort.Sort(sl) - return sl, nil -} - -// parseDirent is a minor modification of syscall.ParseDirent (linux version) -// which returns {name,inode} pairs instead of just names. -func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { - origlen := len(buf) - for len(buf) > 0 { - dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) - buf = buf[dirent.Reclen:] - if dirent.Ino == 0 { // File absent in directory. - continue - } - bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - var name = string(bytes[0:clen(bytes[:])]) - if name == "." || name == ".." { // Useless names - continue - } - names = append(names, nameIno{name, dirent.Ino}) - } - return origlen - len(buf), names -} - -func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } - } - return len(n) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go deleted file mode 100644 index 35832f087..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build !linux - -package archive - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { - var ( - oldRoot, newRoot *FileInfo - err1, err2 error - errs = make(chan error, 2) - ) - go func() { - oldRoot, err1 = collectFileInfo(oldDir) - errs <- err1 - }() - go func() { - newRoot, err2 = collectFileInfo(newDir) - errs <- err2 - }() - - // block until both routines have returned - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - return nil, nil, err - } - } - - return oldRoot, newRoot, nil -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - relPath = filepath.Join(string(os.PathSeparator), relPath) - - // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. - // Temporary workaround. If the returned path starts with two backslashes, - // trim it down to a single backslash. Only relevant on Windows. - if runtime.GOOS == "windows" { - if strings.HasPrefix(relPath, `\\`) { - relPath = relPath[1:] - } - } - - if relPath == string(os.PathSeparator) { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - } - - s, err := system.Lstat(path) - if err != nil { - return err - } - info.stat = s - - info.capability, _ = system.Lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go deleted file mode 100644 index 6646b4dfd..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build !windows - -package archive - -import ( - "os" - "syscall" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mode() != newStat.Mode() || - oldStat.UID() != newStat.UID() || - oldStat.GID() != newStat.GID() || - oldStat.Rdev() != newStat.Rdev() || - // Don't look at size for dirs, its not a good measure of change - (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && - (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 -} - -func getIno(fi os.FileInfo) uint64 { - return uint64(fi.Sys().(*syscall.Stat_t).Ino) -} - -func hasHardlinks(fi os.FileInfo) bool { - return fi.Sys().(*syscall.Stat_t).Nlink > 1 -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go deleted file mode 100644 index 2d8708d0a..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package archive - -import ( - "os" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - - // Don't look at size for dirs, its not a good measure of change - if oldStat.ModTime() != newStat.ModTime() || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go deleted file mode 100644 index 251c9bd99..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go +++ /dev/null @@ -1,407 +0,0 @@ -package archive - -import ( - "archive/tar" - "errors" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// Errors used or returned by this file. -var ( - ErrNotDirectory = errors.New("not a directory") - ErrDirNotExists = errors.New("no such directory") - ErrCannotCopyDir = errors.New("cannot copy directory") - ErrInvalidCopySource = errors.New("invalid copy source content") -) - -// PreserveTrailingDotOrSeparator returns the given cleaned path (after -// processing using any utility functions from the path or filepath stdlib -// packages) and appends a trailing `/.` or `/` if its corresponding original -// path (from before being processed by utility functions from the path or -// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned -// path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in a path separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { - // Ensure paths are in platform semantics - cleanedPath = normalizePath(cleanedPath) - originalPath = normalizePath(originalPath) - - if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { - if !hasTrailingPathSeparator(cleanedPath) { - // Add a separator if it doesn't already end with one (a cleaned - // path would only end in a separator if it is the root). - cleanedPath += string(filepath.Separator) - } - cleanedPath += "." - } - - if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { - cleanedPath += string(filepath.Separator) - } - - return cleanedPath -} - -// assertsDirectory returns whether the given path is -// asserted to be a directory, i.e., the path ends with -// a trailing '/' or `/.`, assuming a path separator of `/`. -func assertsDirectory(path string) bool { - return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) -} - -// hasTrailingPathSeparator returns whether the given -// path ends with the system's path separator character. -func hasTrailingPathSeparator(path string) bool { - return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) -} - -// specifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func specifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} - -// SplitPathDirEntry splits the given path between its directory name and its -// basename by first cleaning the path but preserves a trailing "." if the -// original path specified the current directory. -func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(normalizePath(path)) - - if specifiesCurrentDir(path) { - cleanedPath += string(filepath.Separator) + "." - } - - return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) -} - -// TarResource archives the resource described by the given CopyInfo to a Tar -// archive. A non-nil error is returned if sourcePath does not exist or is -// asserted to be a directory but exists as another type of file. -// -// This function acts as a convenient wrapper around TarWithOptions, which -// requires a directory as the source path. TarResource accepts either a -// directory or a file path and correctly sets the Tar options. -func TarResource(sourceInfo CopyInfo) (content Archive, err error) { - return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) -} - -// TarResourceRebase is like TarResource but renames the first path element of -// items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { - sourcePath = normalizePath(sourcePath) - if _, err = os.Lstat(sourcePath); err != nil { - // Catches the case where the source does not exist or is not a - // directory if asserted to be a directory, as this also causes an - // error. - return - } - - // Separate the source path between it's directory and - // the entry in that directory which we are archiving. - sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - - filter := []string{sourceBase} - - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) - - return TarWithOptions(sourceDir, &TarOptions{ - Compression: Uncompressed, - IncludeFiles: filter, - IncludeSourceDir: true, - RebaseNames: map[string]string{ - sourceBase: rebaseName, - }, - }) -} - -// CopyInfo holds basic info about the source -// or destination path of a copy operation. -type CopyInfo struct { - Path string - Exists bool - IsDir bool - RebaseName string -} - -// CopyInfoSourcePath stats the given path to create a CopyInfo -// struct representing that resource for the source of an archive copy -// operation. The given path should be an absolute local path. A source path -// has all symlinks evaluated that appear before the last path separator ("/" -// on Unix). As it is to be a copy source, the path must exist. -func CopyInfoSourcePath(path string) (CopyInfo, error) { - // Split the given path into its Directory and Base components. We will - // evaluate symlinks in the directory component then append the base. - path = normalizePath(path) - dirPath, basePath := filepath.Split(path) - - resolvedDirPath, err := filepath.EvalSymlinks(dirPath) - if err != nil { - return CopyInfo{}, err - } - - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath := resolvedDirPath + string(filepath.Separator) + basePath - - var rebaseName string - if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { - // In the case where the path had a trailing separator and a symlink - // evaluation has changed the last path component, we will need to - // rebase the name in the archive that is being copied to match the - // originally requested name. - rebaseName = filepath.Base(path) - } - - stat, err := os.Lstat(resolvedPath) - if err != nil { - return CopyInfo{}, err - } - - return CopyInfo{ - Path: resolvedPath, - Exists: true, - IsDir: stat.IsDir(), - RebaseName: rebaseName, - }, nil -} - -// CopyInfoDestinationPath stats the given path to create a CopyInfo -// struct representing that resource for the destination of an archive copy -// operation. The given path should be an absolute local path. -func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { - maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. - path = normalizePath(path) - originalPath := path - - stat, err := os.Lstat(path) - - if err == nil && stat.Mode()&os.ModeSymlink == 0 { - // The path exists and is not a symlink. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil - } - - // While the path is a symlink. - for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { - if n > maxSymlinkIter { - // Don't follow symlinks more than this arbitrary number of times. - return CopyInfo{}, errors.New("too many symlinks in " + originalPath) - } - - // The path is a symbolic link. We need to evaluate it so that the - // destination of the copy operation is the link target and not the - // link itself. This is notably different than CopyInfoSourcePath which - // only evaluates symlinks before the last appearing path separator. - // Also note that it is okay if the last path element is a broken - // symlink as the copy operation should create the target. - var linkTarget string - - linkTarget, err = os.Readlink(path) - if err != nil { - return CopyInfo{}, err - } - - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := SplitPathDirEntry(path) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - path = linkTarget - stat, err = os.Lstat(path) - } - - if err != nil { - // It's okay if the destination path doesn't exist. We can still - // continue the copy operation if the parent directory exists. - if !os.IsNotExist(err) { - return CopyInfo{}, err - } - - // Ensure destination parent dir exists. - dstParent, _ := SplitPathDirEntry(path) - - parentDirStat, err := os.Lstat(dstParent) - if err != nil { - return CopyInfo{}, err - } - if !parentDirStat.IsDir() { - return CopyInfo{}, ErrNotDirectory - } - - return CopyInfo{Path: path}, nil - } - - // The path exists after resolving symlinks. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil -} - -// PrepareArchiveCopy prepares the given srcContent archive, which should -// contain the archived resource described by srcInfo, to the destination -// described by dstInfo. Returns the possibly modified content archive along -// with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { - // Ensure in platform semantics - srcInfo.Path = normalizePath(srcInfo.Path) - dstInfo.Path = normalizePath(dstInfo.Path) - - // Separate the destination path between its directory and base - // components in case the source archive contents need to be rebased. - dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) - _, srcBase := SplitPathDirEntry(srcInfo.Path) - - switch { - case dstInfo.Exists && dstInfo.IsDir: - // The destination exists as a directory. No alteration - // to srcContent is needed as its contents can be - // simply extracted to the destination directory. - return dstInfo.Path, ioutil.NopCloser(srcContent), nil - case dstInfo.Exists && srcInfo.IsDir: - // The destination exists as some type of file and the source - // content is a directory. This is an error condition since - // you cannot copy a directory to an existing file location. - return "", nil, ErrCannotCopyDir - case dstInfo.Exists: - // The destination exists as some type of file and the source content - // is also a file. The source content entry will have to be renamed to - // have a basename which matches the destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case srcInfo.IsDir: - // The destination does not exist and the source content is an archive - // of a directory. The archive should be extracted to the parent of - // the destination path instead, and when it is, the directory that is - // created as a result should take the name of the destination path. - // The source content entries will have to be renamed to have a - // basename which matches the destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case assertsDirectory(dstInfo.Path): - // The destination does not exist and is asserted to be created as a - // directory, but the source content is not a directory. This is an - // error condition since you cannot create a directory from a file - // source. - return "", nil, ErrDirNotExists - default: - // The last remaining case is when the destination does not exist, is - // not asserted to be a directory, and the source content is not an - // archive of a directory. It this case, the destination file will need - // to be created when the archive is extracted and the source content - // entry will have to be renamed to have a basename which matches the - // destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil - } - -} - -// rebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurrence of oldBase with newBase at the beginning of entry names. -func rebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { - if oldBase == string(os.PathSeparator) { - // If oldBase specifies the root directory, use an empty string as - // oldBase instead so that newBase doesn't replace the path separator - // that all paths will start with. - oldBase = "" - } - - rebased, w := io.Pipe() - - go func() { - srcTar := tar.NewReader(srcContent) - rebasedTar := tar.NewWriter(w) - - for { - hdr, err := srcTar.Next() - if err == io.EOF { - // Signals end of archive. - rebasedTar.Close() - w.Close() - return - } - if err != nil { - w.CloseWithError(err) - return - } - - hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) - - if err = rebasedTar.WriteHeader(hdr); err != nil { - w.CloseWithError(err) - return - } - - if _, err = io.Copy(rebasedTar, srcTar); err != nil { - w.CloseWithError(err) - return - } - } - }() - - return rebased -} - -// CopyResource performs an archive copy from the given source path to the -// given destination path. The source path MUST exist and the destination -// path's parent directory must exist. -func CopyResource(srcPath, dstPath string) error { - var ( - srcInfo CopyInfo - err error - ) - - // Ensure in platform semantics - srcPath = normalizePath(srcPath) - dstPath = normalizePath(dstPath) - - // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) - - if srcInfo, err = CopyInfoSourcePath(srcPath); err != nil { - return err - } - - content, err := TarResource(srcInfo) - if err != nil { - return err - } - defer content.Close() - - return CopyTo(content, srcInfo, dstPath) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { - // The destination path need not exist, but CopyInfoDestinationPath will - // ensure that at least the parent directory exists. - dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) - if err != nil { - return err - } - - dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) - if err != nil { - return err - } - defer copyArchive.Close() - - options := &TarOptions{ - NoLchown: true, - NoOverwriteDirNonDir: true, - } - - return Untar(copyArchive, dstDir, options) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go deleted file mode 100644 index e305b5e4a..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.ToSlash(path) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go deleted file mode 100644 index 2b775b45c..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go deleted file mode 100644 index 5ec71110a..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go +++ /dev/null @@ -1,264 +0,0 @@ -package archive - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { - tr := tar.NewReader(layer) - trBuf := pools.BufioReader32KPool.Get(tr) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return 0, err - } - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - if options == nil { - options = &TarOptions{} - } - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - // Windows does not support filenames with colons in them. Ignore - // these files. This is not a problem though (although it might - // appear that it is). Let's suppose a client is running docker pull. - // The daemon it points to is Windows. Would it make sense for the - // client to be doing a docker pull Ubuntu for example (which has files - // with colons in the name under /usr/share/man/man3)? No, absolutely - // not as it would really only make sense that they were pulling a - // Windows image. However, for development, it is necessary to be able - // to pull Linux images which are in the repository. - // - // TODO Windows. Once the registry is aware of what images are Windows- - // specific or Linux-specific, this warning should be changed to an error - // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertently. - if runtime.GOOS == "windows" { - if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) - continue - } - } - - // Note as these operations are platform specific, so must the slash be. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600) - if err != nil { - return 0, err - } - } - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { - return 0, err - } - } - - if hdr.Name != WhiteoutOpaqueDir { - continue - } - } - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return 0, err - } - - // Note as these operations are platform specific, so must the slash be. - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - base := filepath.Base(path) - - if strings.HasPrefix(base, WhiteoutPrefix) { - dir := filepath.Dir(path) - if base == WhiteoutOpaqueDir { - fi, err := os.Lstat(dir) - if err != nil && !os.IsNotExist(err) { - return 0, err - } - if err := os.RemoveAll(dir); err != nil { - return 0, err - } - if err := os.Mkdir(dir, fi.Mode()&os.ModePerm); err != nil { - return 0, err - } - } else { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - trBuf.Reset(tr) - srcData := io.Reader(trBuf) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if srcHdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) - if err != nil { - return 0, err - } - srcHdr.Uid = xUID - } - if srcHdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) - if err != nil { - return 0, err - } - srcHdr.Gid = xGID - } - if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return 0, err - } - } - - return size, nil -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer Reader) (int64, error) { - return applyLayerHandler(dest, layer, &TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} - -// do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform - - if decompress { - layer, err = DecompressStream(layer) - if err != nil { - return 0, err - } - } - return UnpackLayer(dest, layer, options) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go deleted file mode 100644 index a5e08e4ee..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build ignore - -// Simple tool to create an archive stream from an old and new directory -// -// By default it will stream the comparison of two temporary directories with junk files -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" -) - -var ( - flDebug = flag.Bool("D", false, "debugging output") - flNewDir = flag.String("newdir", "", "") - flOldDir = flag.String("olddir", "", "") - log = logrus.New() -) - -func main() { - flag.Usage = func() { - fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") - fmt.Printf("%s [OPTIONS]\n", os.Args[0]) - flag.PrintDefaults() - } - flag.Parse() - log.Out = os.Stderr - if (len(os.Getenv("DEBUG")) > 0) || *flDebug { - logrus.SetLevel(logrus.DebugLevel) - } - var newDir, oldDir string - - if len(*flNewDir) == 0 { - var err error - newDir, err = ioutil.TempDir("", "docker-test-newDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(newDir) - if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { - log.Fatal(err) - } - } else { - newDir = *flNewDir - } - - if len(*flOldDir) == 0 { - oldDir, err := ioutil.TempDir("", "docker-test-oldDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(oldDir) - } else { - oldDir = *flOldDir - } - - changes, err := archive.ChangesDirs(newDir, oldDir) - if err != nil { - log.Fatal(err) - } - - a, err := archive.ExportChanges(newDir, changes) - if err != nil { - log.Fatal(err) - } - defer a.Close() - - i, err := io.Copy(os.Stdout, a) - if err != nil && err != io.EOF { - log.Fatal(err) - } - fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go deleted file mode 100644 index 3448569b1..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go deleted file mode 100644 index e85aac054..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - nsec := int64(0) - if !time.IsZero() { - nsec = time.UnixNano() - } - return syscall.NsecToTimespec(nsec) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go deleted file mode 100644 index 3d9c31321..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go +++ /dev/null @@ -1,23 +0,0 @@ -package archive - -// Whiteouts are files with a special meaning for the layered filesystem. -// Docker uses AUFS whiteout files inside exported archives. In other -// filesystems these files are generated/handled on tar creation/extraction. - -// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a -// filename this means that file has been removed from the base layer. -const WhiteoutPrefix = ".wh." - -// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for remoing an actaul file. Normally these files are excluded from exported -// archives. -const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix - -// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other -// layers. Normally these should not go into exported archives and all changed -// hardlinks should be copied to the top layer. -const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" - -// WhiteoutOpaqueDir file means directory has been made opaque - meaning -// readdir calls to this directory do not follow to lower layers. -const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go deleted file mode 100644 index dfb335c0b..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "io/ioutil" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// * ./foo.txt with content "hello world" -// * ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (Archive, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return ioutil.NopCloser(buf), nil -} - -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go deleted file mode 100644 index 5559732a0..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go +++ /dev/null @@ -1,184 +0,0 @@ -package fileutils - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" -) - -// exclusion return true if the specified pattern is an exclusion -func exclusion(pattern string) bool { - return pattern[0] == '!' -} - -// empty return true if the specified pattern is empty -func empty(pattern string) bool { - return pattern == "" -} - -// CleanPatterns takes a slice of patterns returns a new -// slice of patterns cleaned with filepath.Clean, stripped -// of any empty patterns and lets the caller know whether the -// slice contains any exception patterns (prefixed with !). -func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { - // Loop over exclusion patterns and: - // 1. Clean them up. - // 2. Indicate whether we are dealing with any exception rules. - // 3. Error if we see a single exclusion marker on it's own (!). - cleanedPatterns := []string{} - patternDirs := [][]string{} - exceptions := false - for _, pattern := range patterns { - // Eliminate leading and trailing whitespace. - pattern = strings.TrimSpace(pattern) - if empty(pattern) { - continue - } - if exclusion(pattern) { - if len(pattern) == 1 { - return nil, nil, false, errors.New("Illegal exclusion pattern: !") - } - exceptions = true - } - pattern = filepath.Clean(pattern) - cleanedPatterns = append(cleanedPatterns, pattern) - if exclusion(pattern) { - pattern = pattern[1:] - } - patternDirs = append(patternDirs, strings.Split(pattern, "/")) - } - - return cleanedPatterns, patternDirs, exceptions, nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - patterns, patDirs, _, err := CleanPatterns(patterns) - if err != nil { - return false, err - } - - return OptimizedMatches(file, patterns, patDirs) -} - -// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. -// It will assume that the inputs have been preprocessed and therefore the function -// doen't need to do as much error checking and clean-up. This was done to avoid -// repeating these steps on each file being checked during the archive process. -// The more generic fileutils.Matches() can't make these assumptions. -func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { - matched := false - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, "/") - - for i, pattern := range patterns { - negative := false - - if exclusion(pattern) { - negative = true - pattern = pattern[1:] - } - - match, err := filepath.Match(pattern, file) - if err != nil { - return false, err - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - if len(patDirs[i]) <= len(parentPathDirs) { - match, _ = filepath.Match(strings.Join(patDirs[i], "/"), - strings.Join(parentPathDirs[:len(patDirs[i])], "/")) - } - } - - if match { - matched = !negative - } - } - - if matched { - logrus.Debugf("Skipping excluded path: %s", file) - } - - return matched, nil -} - -// CopyFile copies from src to dst until either EOF is reached -// on src or an error occurs. It verifies src exists and remove -// the dst if it exists. -func CopyFile(src, dst string) (int64, error) { - cleanSrc := filepath.Clean(src) - cleanDst := filepath.Clean(dst) - if cleanSrc == cleanDst { - return 0, nil - } - sf, err := os.Open(cleanSrc) - if err != nil { - return 0, err - } - defer sf.Close() - if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { - return 0, err - } - df, err := os.Create(cleanDst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - -// ReadSymlinkedDirectory returns the target directory of a symlink. -// The target of the symbolic link may not be a file. -func ReadSymlinkedDirectory(path string) (string, error) { - var realPath string - var err error - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) - } - realPathInfo, err := os.Stat(realPath) - if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) - } - if !realPathInfo.Mode().IsDir() { - return "", fmt.Errorf("canonical path points to a file '%s'", realPath) - } - return realPath, nil -} - -// CreateIfNotExists creates a file or a directory only if it does not already exist. -func CreateIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - if isDir { - return os.MkdirAll(path, 0755) - } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go deleted file mode 100644 index 7e00802c1..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build linux freebsd - -package fileutils - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" -) - -// GetTotalUsedFds Returns the number of used File Descriptors by -// reading it via /proc filesystem. -func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) - } else { - return len(fds) - } - return -1 -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go deleted file mode 100644 index 5ec21cace..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. Not supported -// on Windows. -func GetTotalUsedFds() int { - return -1 -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go deleted file mode 100644 index dcae17882..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go +++ /dev/null @@ -1,39 +0,0 @@ -package homedir - -import ( - "os" - "runtime" - - "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - if runtime.GOOS == "windows" { - return "USERPROFILE" - } - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - home := os.Getenv(Key()) - if home == "" && runtime.GOOS != "windows" { - if u, err := user.CurrentUser(); err == nil { - return u.Home - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - if runtime.GOOS == "windows" { - return "%USERPROFILE%" // be careful while using in format functions - } - return "~" -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go deleted file mode 100644 index a1301ee97..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go +++ /dev/null @@ -1,195 +0,0 @@ -package idtools - -import ( - "bufio" - "fmt" - "os" - "sort" - "strconv" - "strings" -) - -// IDMap contains a single entry for user namespace range remapping. An array -// of IDMap entries represents the structure that will be provided to the Linux -// kernel for creating a user namespace. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -type subIDRange struct { - Start int - Length int -} - -type ranges []subIDRange - -func (e ranges) Len() int { return len(e) } -func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } - -const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" -) - -// MkdirAllAs creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership to the requested uid/gid pair. -func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, true) -} - -// MkdirAllNewAs creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership will be performed -func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, false) -} - -// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, false, true) -} - -// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - var uid, gid int - - if uidMap != nil { - xUID, err := ToHost(0, uidMap) - if err != nil { - return -1, -1, err - } - uid = xUID - } - if gidMap != nil { - xGID, err := ToHost(0, gidMap) - if err != nil { - return -1, -1, err - } - gid = xGID - } - return uid, gid, nil -} - -// ToContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func ToContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { - contID := m.ContainerID + (hostID - m.HostID) - return contID, nil - } - } - return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) -} - -// ToHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func ToHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (contID - m.ContainerID) - return hostID, nil - } - } - return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) -} - -// CreateIDMappings takes a requested user and group name and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { - subuidRanges, err := parseSubuid(username) - if err != nil { - return nil, nil, err - } - subgidRanges, err := parseSubgid(groupname) - if err != nil { - return nil, nil, err - } - if len(subuidRanges) == 0 { - return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) - } - if len(subgidRanges) == 0 { - return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) - } - - return createIDMap(subuidRanges), createIDMap(subgidRanges), nil -} - -func createIDMap(subidRanges ranges) []IDMap { - idMap := []IDMap{} - - // sort the ranges by lowest ID first - sort.Sort(subidRanges) - containerID := 0 - for _, idrange := range subidRanges { - idMap = append(idMap, IDMap{ - ContainerID: containerID, - HostID: idrange.Start, - Size: idrange.Length, - }) - containerID = containerID + idrange.Length - } - return idMap -} - -func parseSubuid(username string) (ranges, error) { - return parseSubidFile(subuidFileName, username) -} - -func parseSubgid(username string) (ranges, error) { - return parseSubidFile(subgidFileName, username) -} - -func parseSubidFile(path, username string) (ranges, error) { - var rangeList ranges - - subidFile, err := os.Open(path) - if err != nil { - return rangeList, err - } - defer subidFile.Close() - - s := bufio.NewScanner(subidFile) - for s.Scan() { - if err := s.Err(); err != nil { - return rangeList, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" { - continue - } - parts := strings.Split(text, ":") - if len(parts) != 3 { - return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) - } - if parts[0] == username { - // return the first entry for a user; ignores potential for multiple ranges per user - startid, err := strconv.Atoi(parts[1]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - length, err := strconv.Atoi(parts[2]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - rangeList = append(rangeList, subIDRange{startid, length}) - } - } - return rangeList, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go deleted file mode 100644 index 0444307d2..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build !windows - -package idtools - -import ( - "os" - "path/filepath" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - // make an array containing the original path asked for, plus (for mkAll == true) - // all path components leading up to the complete path that don't exist before we MkdirAll - // so that we can chown all of them properly at the end. If chownExisting is false, we won't - // chown the full directory path if it exists - var paths []string - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - paths = []string{path} - } else if err == nil && chownExisting { - if err := os.Chown(path, ownerUID, ownerGID); err != nil { - return err - } - // short-circuit--we were called with an existing directory and chown was requested - return nil - } else if err == nil { - // nothing to do; directory path fully exists already and chown was NOT requested - return nil - } - - if mkAll { - // walk back to "/" looking for directories which do not exist - // and add them to the paths array for chown after creation - dirPath := path - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { - paths = append(paths, dirPath) - } - } - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { - return err - } - } else { - if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { - return err - } - } - // even if it existed, we will chown the requested path + any subpaths that - // didn't exist when we called MkdirAll - for _, pathComponent := range paths { - if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { - return err - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go deleted file mode 100644 index d5ec992db..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package idtools - -import ( - "os" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go deleted file mode 100644 index c1eedff10..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ /dev/null @@ -1,155 +0,0 @@ -package idtools - -import ( - "fmt" - "os/exec" - "path/filepath" - "strings" - "syscall" -) - -// add a user and/or group to Linux /etc/passwd, /etc/group using standard -// Linux distribution commands: -// adduser --uid --shell /bin/login --no-create-home --disabled-login --ingroup -// useradd -M -u -s /bin/nologin -N -g -// addgroup --gid -// groupadd -g - -const baseUID int = 10000 -const baseGID int = 10000 -const idMAX int = 65534 - -var ( - userCommand string - groupCommand string - - cmdTemplates = map[string]string{ - "adduser": "--uid %d --shell /bin/false --no-create-home --disabled-login --ingroup %s %s", - "useradd": "-M -u %d -s /bin/false -N -g %s %s", - "addgroup": "--gid %d %s", - "groupadd": "-g %d %s", - } -) - -func init() { - // set up which commands are used for adding users/groups dependent on distro - if _, err := resolveBinary("adduser"); err == nil { - userCommand = "adduser" - } else if _, err := resolveBinary("useradd"); err == nil { - userCommand = "useradd" - } - if _, err := resolveBinary("addgroup"); err == nil { - groupCommand = "addgroup" - } else if _, err := resolveBinary("groupadd"); err == nil { - groupCommand = "groupadd" - } -} - -func resolveBinary(binname string) (string, error) { - binaryPath, err := exec.LookPath(binname) - if err != nil { - return "", err - } - resolvedPath, err := filepath.EvalSymlinks(binaryPath) - if err != nil { - return "", err - } - //only return no error if the final resolved binary basename - //matches what was searched for - if filepath.Base(resolvedPath) == binname { - return resolvedPath, nil - } - return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) -} - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -// This new user's /etc/sub{uid,gid} ranges will be used for user namespace -// mapping ranges in containers. -func AddNamespaceRangesUser(name string) (int, int, error) { - // Find unused uid, gid pair - uid, err := findUnusedUID(baseUID) - if err != nil { - return -1, -1, fmt.Errorf("Unable to find unused UID: %v", err) - } - gid, err := findUnusedGID(baseGID) - if err != nil { - return -1, -1, fmt.Errorf("Unable to find unused GID: %v", err) - } - - // First add the group that we will use - if err := addGroup(name, gid); err != nil { - return -1, -1, fmt.Errorf("Error adding group %q: %v", name, err) - } - // Add the user as a member of the group - if err := addUser(name, uid, name); err != nil { - return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) - } - return uid, gid, nil -} - -func addUser(userName string, uid int, groupName string) error { - - if userCommand == "" { - return fmt.Errorf("Cannot add user; no useradd/adduser binary found") - } - args := fmt.Sprintf(cmdTemplates[userCommand], uid, groupName, userName) - return execAddCmd(userCommand, args) -} - -func addGroup(groupName string, gid int) error { - - if groupCommand == "" { - return fmt.Errorf("Cannot add group; no groupadd/addgroup binary found") - } - args := fmt.Sprintf(cmdTemplates[groupCommand], gid, groupName) - // only error out if the error isn't that the group already exists - // if the group exists then our needs are already met - if err := execAddCmd(groupCommand, args); err != nil && !strings.Contains(err.Error(), "already exists") { - return err - } - return nil -} - -func execAddCmd(cmd, args string) error { - execCmd := exec.Command(cmd, strings.Split(args, " ")...) - out, err := execCmd.CombinedOutput() - if err != nil { - return fmt.Errorf("Failed to add user/group with error: %v; output: %q", err, string(out)) - } - return nil -} - -func findUnusedUID(startUID int) (int, error) { - return findUnused("passwd", startUID) -} - -func findUnusedGID(startGID int) (int, error) { - return findUnused("group", startGID) -} - -func findUnused(file string, id int) (int, error) { - for { - cmdStr := fmt.Sprintf("cat /etc/%s | cut -d: -f3 | grep '^%d$'", file, id) - cmd := exec.Command("sh", "-c", cmdStr) - if err := cmd.Run(); err != nil { - // if a non-zero return code occurs, then we know the ID was not found - // and is usable - if exiterr, ok := err.(*exec.ExitError); ok { - // The program has exited with an exit code != 0 - if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { - if status.ExitStatus() == 1 { - //no match, we can use this ID - return id, nil - } - } - } - return -1, fmt.Errorf("Error looking in /etc/%s for unused ID: %v", file, err) - } - id++ - if id > idMAX { - return -1, fmt.Errorf("Maximum id in %q reached with finding unused numeric ID", file) - } - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go deleted file mode 100644 index d98b354cb..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package idtools - -import "fmt" - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -func AddNamespaceRangesUser(name string) (int, int, error) { - return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go deleted file mode 100644 index 932e1d1bc..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,89 +0,0 @@ -package ioutils - -const maxCap = 1e6 - -// BytesPipe is io.ReadWriter which works similarly to pipe(queue). -// All written data could be read only once. Also BytesPipe is allocating -// and releasing new byte slices to adjust to current needs, so there won't be -// overgrown buffer after high load peak. -// BytesPipe isn't goroutine-safe, caller must synchronize it if needed. -type BytesPipe struct { - buf [][]byte // slice of byte-slices of buffered data - lastRead int // index in the first slice to a read point - bufLen int // length of data buffered over the slices -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe(buf []byte) *BytesPipe { - if cap(buf) == 0 { - buf = make([]byte, 0, 64) - } - return &BytesPipe{ - buf: [][]byte{buf[:0]}, - } -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (n int, err error) { - for { - // write data to the last buffer - b := bp.buf[len(bp.buf)-1] - // copy data to the current empty allocated area - n := copy(b[len(b):cap(b)], p) - // increment buffered data length - bp.bufLen += n - // include written data in last buffer - bp.buf[len(bp.buf)-1] = b[:len(b)+n] - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - // allocate slice that has twice the size of the last unless maximum reached - nextCap := 2 * cap(bp.buf[len(bp.buf)-1]) - if maxCap < nextCap { - nextCap = maxCap - } - // add new byte slice to the buffers slice and continue writing - bp.buf = append(bp.buf, make([]byte, 0, nextCap)) - } - return -} - -func (bp *BytesPipe) len() int { - return bp.bufLen - bp.lastRead -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { - for { - read := copy(p, bp.buf[0][bp.lastRead:]) - n += read - bp.lastRead += read - if bp.len() == 0 { - // we have read everything. reset to the beginning. - bp.lastRead = 0 - bp.bufLen -= len(bp.buf[0]) - bp.buf[0] = bp.buf[0][:0] - break - } - // break if everything was read - if len(p) == read { - break - } - // more buffered data and more asked. read from next slice. - p = p[read:] - bp.lastRead = 0 - bp.bufLen -= len(bp.buf[0]) - bp.buf[0] = nil // throw away old slice - bp.buf = bp.buf[1:] // switch to next - } - return -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go deleted file mode 100644 index 0b04b0ba3..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go +++ /dev/null @@ -1,22 +0,0 @@ -package ioutils - -import ( - "fmt" - "io" -) - -// FprintfIfNotEmpty prints the string value if it's not empty -func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { - if value != "" { - return fmt.Fprintf(w, format, value) - } - return 0, nil -} - -// FprintfIfTrue prints the boolean value if it's true -func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { - if ok { - return fmt.Fprintf(w, format, ok) - } - return 0, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go deleted file mode 100644 index f231aa9da..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go +++ /dev/null @@ -1,226 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "os" -) - -type pos struct { - idx int - offset int64 -} - -type multiReadSeeker struct { - readers []io.ReadSeeker - pos *pos - posIdx map[io.ReadSeeker]int -} - -func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { - var tmpOffset int64 - switch whence { - case os.SEEK_SET: - for i, rdr := range r.readers { - // get size of the current reader - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - if offset > tmpOffset+s { - if i == len(r.readers)-1 { - rdrOffset := s + (offset - tmpOffset) - if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { - return -1, err - } - r.pos = &pos{i, rdrOffset} - return offset, nil - } - - tmpOffset += s - continue - } - - rdrOffset := offset - tmpOffset - idx := i - - rdr.Seek(rdrOffset, os.SEEK_SET) - // make sure all following readers are at 0 - for _, rdr := range r.readers[i+1:] { - rdr.Seek(0, os.SEEK_SET) - } - - if rdrOffset == s && i != len(r.readers)-1 { - idx += 1 - rdrOffset = 0 - } - r.pos = &pos{idx, rdrOffset} - return offset, nil - } - case os.SEEK_END: - for _, rdr := range r.readers { - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - tmpOffset += s - } - r.Seek(tmpOffset+offset, os.SEEK_SET) - return tmpOffset + offset, nil - case os.SEEK_CUR: - if r.pos == nil { - return r.Seek(offset, os.SEEK_SET) - } - // Just return the current offset - if offset == 0 { - return r.getCurOffset() - } - - curOffset, err := r.getCurOffset() - if err != nil { - return -1, err - } - rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) - if err != nil { - return -1, err - } - - r.pos = &pos{r.posIdx[rdr], rdrOffset} - return curOffset + offset, nil - default: - return -1, fmt.Errorf("Invalid whence: %d", whence) - } - - return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) -} - -func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { - var rdr io.ReadSeeker - var rdrOffset int64 - - for i, rdr := range r.readers { - offsetTo, err := r.getOffsetToReader(rdr) - if err != nil { - return nil, -1, err - } - if offsetTo > offset { - rdr = r.readers[i-1] - rdrOffset = offsetTo - offset - break - } - - if rdr == r.readers[len(r.readers)-1] { - rdrOffset = offsetTo + offset - break - } - } - - return rdr, rdrOffset, nil -} - -func (r *multiReadSeeker) getCurOffset() (int64, error) { - var totalSize int64 - for _, rdr := range r.readers[:r.pos.idx+1] { - if r.posIdx[rdr] == r.pos.idx { - totalSize += r.pos.offset - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, fmt.Errorf("error getting seeker size: %v", err) - } - totalSize += size - } - return totalSize, nil -} - -func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { - var offset int64 - for _, r := range r.readers { - if r == rdr { - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, err - } - offset += size - } - return offset, nil -} - -func (r *multiReadSeeker) Read(b []byte) (int, error) { - if r.pos == nil { - r.pos = &pos{0, 0} - } - - bCap := int64(cap(b)) - buf := bytes.NewBuffer(nil) - var rdr io.ReadSeeker - - for _, rdr = range r.readers[r.pos.idx:] { - readBytes, err := io.CopyN(buf, rdr, bCap) - if err != nil && err != io.EOF { - return -1, err - } - bCap -= readBytes - - if bCap == 0 { - break - } - } - - rdrPos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - r.pos = &pos{r.posIdx[rdr], rdrPos} - return buf.Read(b) -} - -func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { - // save the current position - pos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - - // get the size - size, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - // reset the position - if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { - return -1, err - } - return size, nil -} - -// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided -// input readseekers. After calling this method the initial position is set to the -// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances -// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. -// Seek can be used over the sum of lengths of all readseekers. -// -// When a MultiReadSeeker is used, no Read and Seek operations should be made on -// its ReadSeeker components. Also, users should make no assumption on the state -// of individual readseekers while the MultiReadSeeker is used. -func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { - if len(readers) == 1 { - return readers[0] - } - idx := make(map[io.ReadSeeker]int) - for i, rdr := range readers { - idx[rdr] = i - } - return &multiReadSeeker{ - readers: readers, - posIdx: idx, - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go deleted file mode 100644 index 54dd312bb..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go +++ /dev/null @@ -1,170 +0,0 @@ -package ioutils - -import ( - "crypto/sha256" - "encoding/hex" - "io" - "sync" -) - -type readCloserWrapper struct { - io.Reader - closer func() error -} - -func (r *readCloserWrapper) Close() error { - return r.closer() -} - -// NewReadCloserWrapper returns a new io.ReadCloser. -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &readCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -// NewReaderErrWrapper returns a new io.Reader. -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// bufReader allows the underlying reader to continue to produce -// output by pre-emptively reading from the wrapped reader. -// This is achieved by buffering this data in bufReader's -// expanding buffer. -type bufReader struct { - sync.Mutex - buf io.ReadWriter - reader io.Reader - err error - wait sync.Cond - drainBuf []byte -} - -// NewBufReader returns a new bufReader. -func NewBufReader(r io.Reader) io.ReadCloser { - reader := &bufReader{ - buf: NewBytesPipe(nil), - reader: r, - drainBuf: make([]byte, 1024), - } - reader.wait.L = &reader.Mutex - go reader.drain() - return reader -} - -// NewBufReaderWithDrainbufAndBuffer returns a BufReader with drainBuffer and buffer. -func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer io.ReadWriter) io.ReadCloser { - reader := &bufReader{ - buf: buffer, - drainBuf: drainBuffer, - reader: r, - } - reader.wait.L = &reader.Mutex - go reader.drain() - return reader -} - -func (r *bufReader) drain() { - for { - //Call to scheduler is made to yield from this goroutine. - //This avoids goroutine looping here when n=0,err=nil, fixes code hangs when run with GCC Go. - callSchedulerIfNecessary() - n, err := r.reader.Read(r.drainBuf) - r.Lock() - if err != nil { - r.err = err - } else { - if n == 0 { - // nothing written, no need to signal - r.Unlock() - continue - } - r.buf.Write(r.drainBuf[:n]) - } - r.wait.Signal() - r.Unlock() - if err != nil { - break - } - } -} - -func (r *bufReader) Read(p []byte) (n int, err error) { - r.Lock() - defer r.Unlock() - for { - n, err = r.buf.Read(p) - if n > 0 { - return n, err - } - if r.err != nil { - return 0, r.err - } - r.wait.Wait() - } -} - -// Close closes the bufReader -func (r *bufReader) Close() error { - closer, ok := r.reader.(io.ReadCloser) - if !ok { - return nil - } - return closer.Close() -} - -// HashData returns the sha256 sum of src. -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - -// OnEOFReader wraps a io.ReadCloser and a function -// the function will run at the end of file or close the file. -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -// Close closes the file and run the function. -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go deleted file mode 100644 index 3c88f29e3..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !gccgo - -package ioutils - -func callSchedulerIfNecessary() { -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go deleted file mode 100644 index c11d02b94..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build gccgo - -package ioutils - -import ( - "runtime" -) - -func callSchedulerIfNecessary() { - //allow or force Go scheduler to switch context, without explicitly - //forcing this will make it hang when using gccgo implementation - runtime.Gosched() -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_unix.go deleted file mode 100644 index 1539ad21b..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package ioutils - -import "io/ioutil" - -// TempDir on Unix systems is equivalent to ioutil.TempDir. -func TempDir(dir, prefix string) (string, error) { - return ioutil.TempDir(dir, prefix) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_windows.go deleted file mode 100644 index 72c0bc597..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package ioutils - -import ( - "io/ioutil" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath" -) - -// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. -func TempDir(dir, prefix string) (string, error) { - tempDir, err := ioutil.TempDir(dir, prefix) - if err != nil { - return "", err - } - return longpath.AddPrefix(tempDir), nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go deleted file mode 100644 index 25095474d..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,47 +0,0 @@ -package ioutils - -import ( - "io" - "net/http" - "sync" -) - -type WriteFlusher struct { - sync.Mutex - w io.Writer - flusher http.Flusher - flushed bool -} - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - wf.Lock() - defer wf.Unlock() - n, err = wf.w.Write(b) - wf.flushed = true - wf.flusher.Flush() - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - wf.Lock() - defer wf.Unlock() - wf.flushed = true - wf.flusher.Flush() -} - -func (wf *WriteFlusher) Flushed() bool { - wf.Lock() - defer wf.Unlock() - return wf.flushed -} - -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var flusher http.Flusher - if f, ok := w.(http.Flusher); ok { - flusher = f - } else { - flusher = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: flusher} -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go deleted file mode 100644 index 43fdc44ea..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go +++ /dev/null @@ -1,60 +0,0 @@ -package ioutils - -import "io" - -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -type NopFlusher struct{} - -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (r *writeCloserWrapper) Close() error { - return r.closer() -} - -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: r, - closer: closer, - } -} - -// Wrap a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath.go deleted file mode 100644 index 9b15bfff4..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath.go +++ /dev/null @@ -1,26 +0,0 @@ -// longpath introduces some constants and helper functions for handling long paths -// in Windows, which are expected to be prepended with `\\?\` and followed by either -// a drive letter, a UNC server\share, or a volume identifier. - -package longpath - -import ( - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix will add the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE deleted file mode 100644 index ac74d8f04..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md deleted file mode 100644 index da00efa33..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md +++ /dev/null @@ -1,40 +0,0 @@ -Package mflag (aka multiple-flag) implements command-line flag parsing. -It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/) - -It adds: - -* both short and long flag version -`./example -s red` `./example --string blue` - -* multiple names for the same option -``` -$>./example -h -Usage of example: - -s, --string="": a simple string -``` - -___ -It is very flexible on purpose, so you can do things like: -``` -$>./example -h -Usage of example: - -s, -string, --string="": a simple string -``` - -Or: -``` -$>./example -h -Usage of example: - -oldflag, --newflag="": a simple string -``` - -You can also hide some flags from the usage, so if we want only `--newflag`: -``` -$>./example -h -Usage of example: - --newflag="": a simple string -$>./example -oldflag str -str -``` - -See [example.go](example/example.go) for more details. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go deleted file mode 100644 index ebfa35010..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go +++ /dev/null @@ -1,1201 +0,0 @@ -// Copyright 2014-2015 The Docker & Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package mflag implements command-line flag parsing. -// -// Usage: -// -// Define flags using flag.String(), Bool(), Int(), etc. -// -// This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. -// import "flag /github.com/docker/docker/pkg/mflag" -// var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") -// If you like, you can bind the flag to a variable using the Var() functions. -// var flagvar int -// func init() { -// // -flaghidden will work, but will be hidden from the usage -// flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") -// } -// Or you can create custom flags that satisfy the Value interface (with -// pointer receivers) and couple them to flag parsing by -// flag.Var(&flagVal, []string{"name"}, "help message for flagname") -// For such flags, the default value is just the initial value of the variable. -// -// You can also add "deprecated" flags, they are still usable, but are not shown -// in the usage and will display a warning when you try to use them. `#` before -// an option means this option is deprecated, if there is an following option -// without `#` ahead, then that's the replacement, if not, it will just be removed: -// var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname") -// this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or -// this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` -// var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") -// will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.` -// so you can only use `-f`. -// -// You can also group one letter flags, bif you declare -// var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") -// var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") -// you will be able to use the -vs or -sv -// -// After all flags are defined, call -// flag.Parse() -// to parse the command line into the defined flags. -// -// Flags may then be used directly. If you're using the flags themselves, -// they are all pointers; if you bind to variables, they're values. -// fmt.Println("ip has value ", *ip) -// fmt.Println("flagvar has value ", flagvar) -// -// After parsing, the arguments after the flag are available as the -// slice flag.Args() or individually as flag.Arg(i). -// The arguments are indexed from 0 through flag.NArg()-1. -// -// Command line flag syntax: -// -flag -// -flag=x -// -flag="x" -// -flag='x' -// -flag x // non-boolean flags only -// One or two minus signs may be used; they are equivalent. -// The last form is not permitted for boolean flags because the -// meaning of the command -// cmd -x * -// will change if there is a file called 0, false, etc. You must -// use the -flag=false form to turn off a boolean flag. -// -// Flag parsing stops just before the first non-flag argument -// ("-" is a non-flag argument) or after the terminator "--". -// -// Integer flags accept 1234, 0664, 0x1234 and may be negative. -// Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. -// Duration flags accept any input valid for time.ParseDuration. -// -// The default set of command-line flags is controlled by -// top-level functions. The FlagSet type allows one to define -// independent sets of flags, such as to implement subcommands -// in a command-line interface. The methods of FlagSet are -// analogous to the top-level functions for the command-line -// flag set. - -package mflag - -import ( - "errors" - "fmt" - "io" - "os" - "runtime" - "sort" - "strconv" - "strings" - "text/tabwriter" - "time" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir" -) - -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. -var ErrHelp = errors.New("flag: help requested") - -// ErrRetry is the error returned if you need to try letter by letter -var ErrRetry = errors.New("flag: retry") - -// -- bool Value -type boolValue bool - -func newBoolValue(val bool, p *bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(s string) error { - v, err := strconv.ParseBool(s) - *b = boolValue(v) - return err -} - -func (b *boolValue) Get() interface{} { return bool(*b) } - -func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } - -func (b *boolValue) IsBoolFlag() bool { return true } - -// optional interface to indicate boolean flags that can be -// supplied without "=value" text -type boolFlag interface { - Value - IsBoolFlag() bool -} - -// -- int Value -type intValue int - -func newIntValue(val int, p *int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = intValue(v) - return err -} - -func (i *intValue) Get() interface{} { return int(*i) } - -func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } - -// -- int64 Value -type int64Value int64 - -func newInt64Value(val int64, p *int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (i *int64Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int64Value(v) - return err -} - -func (i *int64Value) Get() interface{} { return int64(*i) } - -func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } - -// -- uint Value -type uintValue uint - -func newUintValue(val uint, p *uint) *uintValue { - *p = val - return (*uintValue)(p) -} - -func (i *uintValue) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uintValue(v) - return err -} - -func (i *uintValue) Get() interface{} { return uint(*i) } - -func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } - -// -- uint64 Value -type uint64Value uint64 - -func newUint64Value(val uint64, p *uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (i *uint64Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uint64Value(v) - return err -} - -func (i *uint64Value) Get() interface{} { return uint64(*i) } - -func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } - -// -- string Value -type stringValue string - -func newStringValue(val string, p *string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} - -func (s *stringValue) Get() interface{} { return string(*s) } - -func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } - -// -- float64 Value -type float64Value float64 - -func newFloat64Value(val float64, p *float64) *float64Value { - *p = val - return (*float64Value)(p) -} - -func (f *float64Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 64) - *f = float64Value(v) - return err -} - -func (f *float64Value) Get() interface{} { return float64(*f) } - -func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } - -// -- time.Duration Value -type durationValue time.Duration - -func newDurationValue(val time.Duration, p *time.Duration) *durationValue { - *p = val - return (*durationValue)(p) -} - -func (d *durationValue) Set(s string) error { - v, err := time.ParseDuration(s) - *d = durationValue(v) - return err -} - -func (d *durationValue) Get() interface{} { return time.Duration(*d) } - -func (d *durationValue) String() string { return (*time.Duration)(d).String() } - -// Value is the interface to the dynamic value stored in a flag. -// (The default value is represented as a string.) -// -// If a Value has an IsBoolFlag() bool method returning true, -// the command-line parser makes -name equivalent to -name=true -// rather than using the next command-line argument. -type Value interface { - String() string - Set(string) error -} - -// Getter is an interface that allows the contents of a Value to be retrieved. -// It wraps the Value interface, rather than being part of it, because it -// appeared after Go 1 and its compatibility rules. All Value types provided -// by this package satisfy the Getter interface. -type Getter interface { - Value - Get() interface{} -} - -// ErrorHandling defines how to handle flag parsing errors. -type ErrorHandling int - -// ErrorHandling strategies available when a flag parsing error occurs -const ( - ContinueOnError ErrorHandling = iota - ExitOnError - PanicOnError -) - -// A FlagSet represents a set of defined flags. The zero value of a FlagSet -// has no name and has ContinueOnError error handling. -type FlagSet struct { - // Usage is the function called when an error occurs while parsing flags. - // The field is a function (not a method) that may be changed to point to - // a custom error handler. - Usage func() - ShortUsage func() - - name string - parsed bool - actual map[string]*Flag - formal map[string]*Flag - args []string // arguments after flags - errorHandling ErrorHandling - output io.Writer // nil means stderr; use Out() accessor - nArgRequirements []nArgRequirement -} - -// A Flag represents the state of a flag. -type Flag struct { - Names []string // name as it appears on command line - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message -} - -type flagSlice []string - -func (p flagSlice) Len() int { return len(p) } -func (p flagSlice) Less(i, j int) bool { - pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-") - lpi, lpj := strings.ToLower(pi), strings.ToLower(pj) - if lpi != lpj { - return lpi < lpj - } - return pi < pj -} -func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// sortFlags returns the flags as a slice in lexicographical sorted order. -func sortFlags(flags map[string]*Flag) []*Flag { - var list flagSlice - - // The sorted list is based on the first name, when flag map might use the other names. - nameMap := make(map[string]string) - - for n, f := range flags { - fName := strings.TrimPrefix(f.Names[0], "#") - nameMap[fName] = n - if len(f.Names) == 1 { - list = append(list, fName) - continue - } - - found := false - for _, name := range list { - if name == fName { - found = true - break - } - } - if !found { - list = append(list, fName) - } - } - sort.Sort(list) - result := make([]*Flag, len(list)) - for i, name := range list { - result[i] = flags[nameMap[name]] - } - return result -} - -// Name returns the name of the FlagSet. -func (fs *FlagSet) Name() string { - return fs.name -} - -// Out returns the destination for usage and error messages. -func (fs *FlagSet) Out() io.Writer { - if fs.output == nil { - return os.Stderr - } - return fs.output -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (fs *FlagSet) SetOutput(output io.Writer) { - fs.output = output -} - -// VisitAll visits the flags in lexicographical order, calling fn for each. -// It visits all flags, even those not set. -func (fs *FlagSet) VisitAll(fn func(*Flag)) { - for _, flag := range sortFlags(fs.formal) { - fn(flag) - } -} - -// VisitAll visits the command-line flags in lexicographical order, calling -// fn for each. It visits all flags, even those not set. -func VisitAll(fn func(*Flag)) { - CommandLine.VisitAll(fn) -} - -// Visit visits the flags in lexicographical order, calling fn for each. -// It visits only those flags that have been set. -func (fs *FlagSet) Visit(fn func(*Flag)) { - for _, flag := range sortFlags(fs.actual) { - fn(flag) - } -} - -// Visit visits the command-line flags in lexicographical order, calling fn -// for each. It visits only those flags that have been set. -func Visit(fn func(*Flag)) { - CommandLine.Visit(fn) -} - -// Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (fs *FlagSet) Lookup(name string) *Flag { - return fs.formal[name] -} - -// IsSet indicates whether the specified flag is set in the given FlagSet -func (fs *FlagSet) IsSet(name string) bool { - return fs.actual[name] != nil -} - -// Lookup returns the Flag structure of the named command-line flag, -// returning nil if none exists. -func Lookup(name string) *Flag { - return CommandLine.formal[name] -} - -// IsSet indicates whether the specified flag was specified at all on the cmd line. -func IsSet(name string) bool { - return CommandLine.IsSet(name) -} - -type nArgRequirementType int - -// Indicator used to pass to BadArgs function -const ( - Exact nArgRequirementType = iota - Max - Min -) - -type nArgRequirement struct { - Type nArgRequirementType - N int -} - -// Require adds a requirement about the number of arguments for the FlagSet. -// The first parameter can be Exact, Max, or Min to respectively specify the exact, -// the maximum, or the minimal number of arguments required. -// The actual check is done in FlagSet.CheckArgs(). -func (fs *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) { - fs.nArgRequirements = append(fs.nArgRequirements, nArgRequirement{nArgRequirementType, nArg}) -} - -// CheckArgs uses the requirements set by FlagSet.Require() to validate -// the number of arguments. If the requirements are not met, -// an error message string is returned. -func (fs *FlagSet) CheckArgs() (message string) { - for _, req := range fs.nArgRequirements { - var arguments string - if req.N == 1 { - arguments = "1 argument" - } else { - arguments = fmt.Sprintf("%d arguments", req.N) - } - - str := func(kind string) string { - return fmt.Sprintf("%q requires %s%s", fs.name, kind, arguments) - } - - switch req.Type { - case Exact: - if fs.NArg() != req.N { - return str("") - } - case Max: - if fs.NArg() > req.N { - return str("a maximum of ") - } - case Min: - if fs.NArg() < req.N { - return str("a minimum of ") - } - } - } - return "" -} - -// Set sets the value of the named flag. -func (fs *FlagSet) Set(name, value string) error { - flag, ok := fs.formal[name] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - if err := flag.Value.Set(value); err != nil { - return err - } - if fs.actual == nil { - fs.actual = make(map[string]*Flag) - } - fs.actual[name] = flag - return nil -} - -// Set sets the value of the named command-line flag. -func Set(name, value string) error { - return CommandLine.Set(name, value) -} - -// PrintDefaults prints, to standard error unless configured -// otherwise, the default values of all defined flags in the set. -func (fs *FlagSet) PrintDefaults() { - writer := tabwriter.NewWriter(fs.Out(), 20, 1, 3, ' ', 0) - home := homedir.Get() - - // Don't substitute when HOME is / - if runtime.GOOS != "windows" && home == "/" { - home = "" - } - - // Add a blank line between cmd description and list of options - if fs.FlagCount() > 0 { - fmt.Fprintln(writer, "") - } - - fs.VisitAll(func(flag *Flag) { - format := " -%s=%s" - names := []string{} - for _, name := range flag.Names { - if name[0] != '#' { - names = append(names, name) - } - } - if len(names) > 0 && len(flag.Usage) > 0 { - val := flag.DefValue - - if home != "" && strings.HasPrefix(val, home) { - val = homedir.GetShortcutString() + val[len(home):] - } - - fmt.Fprintf(writer, format, strings.Join(names, ", -"), val) - for i, line := range strings.Split(flag.Usage, "\n") { - if i != 0 { - line = " " + line - } - fmt.Fprintln(writer, "\t", line) - } - } - }) - writer.Flush() -} - -// PrintDefaults prints to standard error the default values of all defined command-line flags. -func PrintDefaults() { - CommandLine.PrintDefaults() -} - -// defaultUsage is the default function to print a usage message. -func defaultUsage(fs *FlagSet) { - if fs.name == "" { - fmt.Fprintf(fs.Out(), "Usage:\n") - } else { - fmt.Fprintf(fs.Out(), "Usage of %s:\n", fs.name) - } - fs.PrintDefaults() -} - -// NOTE: Usage is not just defaultUsage(CommandLine) -// because it serves (via godoc flag Usage) as the example -// for how to write your own usage function. - -// Usage prints to standard error a usage message documenting all defined command-line flags. -// The function is a variable that may be changed to point to a custom function. -var Usage = func() { - fmt.Fprintf(CommandLine.Out(), "Usage of %s:\n", os.Args[0]) - PrintDefaults() -} - -// Usage prints to standard error a usage message documenting the standard command layout -// The function is a variable that may be changed to point to a custom function. -var ShortUsage = func() { - fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0]) -} - -// FlagCount returns the number of flags that have been defined. -func (fs *FlagSet) FlagCount() int { return len(sortFlags(fs.formal)) } - -// FlagCountUndeprecated returns the number of undeprecated flags that have been defined. -func (fs *FlagSet) FlagCountUndeprecated() int { - count := 0 - for _, flag := range sortFlags(fs.formal) { - for _, name := range flag.Names { - if name[0] != '#' { - count++ - break - } - } - } - return count -} - -// NFlag returns the number of flags that have been set. -func (fs *FlagSet) NFlag() int { return len(fs.actual) } - -// NFlag returns the number of command-line flags that have been set. -func NFlag() int { return len(CommandLine.actual) } - -// Arg returns the i'th argument. Arg(0) is the first remaining argument -// after flags have been processed. -func (fs *FlagSet) Arg(i int) string { - if i < 0 || i >= len(fs.args) { - return "" - } - return fs.args[i] -} - -// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument -// after flags have been processed. -func Arg(i int) string { - return CommandLine.Arg(i) -} - -// NArg is the number of arguments remaining after flags have been processed. -func (fs *FlagSet) NArg() int { return len(fs.args) } - -// NArg is the number of arguments remaining after flags have been processed. -func NArg() int { return len(CommandLine.args) } - -// Args returns the non-flag arguments. -func (fs *FlagSet) Args() []string { return fs.args } - -// Args returns the non-flag command-line arguments. -func Args() []string { return CommandLine.args } - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func (fs *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { - fs.Var(newBoolValue(value, p), names, usage) -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func BoolVar(p *bool, names []string, value bool, usage string) { - CommandLine.Var(newBoolValue(value, p), names, usage) -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func (fs *FlagSet) Bool(names []string, value bool, usage string) *bool { - p := new(bool) - fs.BoolVar(p, names, value, usage) - return p -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func Bool(names []string, value bool, usage string) *bool { - return CommandLine.Bool(names, value, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func (fs *FlagSet) IntVar(p *int, names []string, value int, usage string) { - fs.Var(newIntValue(value, p), names, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func IntVar(p *int, names []string, value int, usage string) { - CommandLine.Var(newIntValue(value, p), names, usage) -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func (fs *FlagSet) Int(names []string, value int, usage string) *int { - p := new(int) - fs.IntVar(p, names, value, usage) - return p -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func Int(names []string, value int, usage string) *int { - return CommandLine.Int(names, value, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func (fs *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { - fs.Var(newInt64Value(value, p), names, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func Int64Var(p *int64, names []string, value int64, usage string) { - CommandLine.Var(newInt64Value(value, p), names, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func (fs *FlagSet) Int64(names []string, value int64, usage string) *int64 { - p := new(int64) - fs.Int64Var(p, names, value, usage) - return p -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func Int64(names []string, value int64, usage string) *int64 { - return CommandLine.Int64(names, value, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (fs *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { - fs.Var(newUintValue(value, p), names, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func UintVar(p *uint, names []string, value uint, usage string) { - CommandLine.Var(newUintValue(value, p), names, usage) -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (fs *FlagSet) Uint(names []string, value uint, usage string) *uint { - p := new(uint) - fs.UintVar(p, names, value, usage) - return p -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint(names []string, value uint, usage string) *uint { - return CommandLine.Uint(names, value, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func (fs *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { - fs.Var(newUint64Value(value, p), names, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func Uint64Var(p *uint64, names []string, value uint64, usage string) { - CommandLine.Var(newUint64Value(value, p), names, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func (fs *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { - p := new(uint64) - fs.Uint64Var(p, names, value, usage) - return p -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func Uint64(names []string, value uint64, usage string) *uint64 { - return CommandLine.Uint64(names, value, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func (fs *FlagSet) StringVar(p *string, names []string, value string, usage string) { - fs.Var(newStringValue(value, p), names, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func StringVar(p *string, names []string, value string, usage string) { - CommandLine.Var(newStringValue(value, p), names, usage) -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func (fs *FlagSet) String(names []string, value string, usage string) *string { - p := new(string) - fs.StringVar(p, names, value, usage) - return p -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func String(names []string, value string, usage string) *string { - return CommandLine.String(names, value, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func (fs *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { - fs.Var(newFloat64Value(value, p), names, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func Float64Var(p *float64, names []string, value float64, usage string) { - CommandLine.Var(newFloat64Value(value, p), names, usage) -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func (fs *FlagSet) Float64(names []string, value float64, usage string) *float64 { - p := new(float64) - fs.Float64Var(p, names, value, usage) - return p -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func Float64(names []string, value float64, usage string) *float64 { - return CommandLine.Float64(names, value, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func (fs *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { - fs.Var(newDurationValue(value, p), names, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { - CommandLine.Var(newDurationValue(value, p), names, usage) -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func (fs *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - fs.DurationVar(p, names, value, usage) - return p -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func Duration(names []string, value time.Duration, usage string) *time.Duration { - return CommandLine.Duration(names, value, usage) -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func (fs *FlagSet) Var(value Value, names []string, usage string) { - // Remember the default value as a string; it won't change. - flag := &Flag{names, usage, value, value.String()} - for _, name := range names { - name = strings.TrimPrefix(name, "#") - _, alreadythere := fs.formal[name] - if alreadythere { - var msg string - if fs.name == "" { - msg = fmt.Sprintf("flag redefined: %s", name) - } else { - msg = fmt.Sprintf("%s flag redefined: %s", fs.name, name) - } - fmt.Fprintln(fs.Out(), msg) - panic(msg) // Happens only if flags are declared with identical names - } - if fs.formal == nil { - fs.formal = make(map[string]*Flag) - } - fs.formal[name] = flag - } -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func Var(value Value, names []string, usage string) { - CommandLine.Var(value, names, usage) -} - -// failf prints to standard error a formatted error and usage message and -// returns the error. -func (fs *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) - fmt.Fprintln(fs.Out(), err) - if os.Args[0] == fs.name { - fmt.Fprintf(fs.Out(), "See '%s --help'.\n", os.Args[0]) - } else { - fmt.Fprintf(fs.Out(), "See '%s %s --help'.\n", os.Args[0], fs.name) - } - return err -} - -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. -func (fs *FlagSet) usage() { - if fs == CommandLine { - Usage() - } else if fs.Usage == nil { - defaultUsage(fs) - } else { - fs.Usage() - } -} - -func trimQuotes(str string) string { - if len(str) == 0 { - return str - } - type quote struct { - start, end byte - } - - // All valid quote types. - quotes := []quote{ - // Double quotes - { - start: '"', - end: '"', - }, - - // Single quotes - { - start: '\'', - end: '\'', - }, - } - - for _, quote := range quotes { - // Only strip if outermost match. - if str[0] == quote.start && str[len(str)-1] == quote.end { - str = str[1 : len(str)-1] - break - } - } - - return str -} - -// parseOne parses one flag. It reports whether a flag was seen. -func (fs *FlagSet) parseOne() (bool, string, error) { - if len(fs.args) == 0 { - return false, "", nil - } - s := fs.args[0] - if len(s) == 0 || s[0] != '-' || len(s) == 1 { - return false, "", nil - } - if s[1] == '-' && len(s) == 2 { // "--" terminates the flags - fs.args = fs.args[1:] - return false, "", nil - } - name := s[1:] - if len(name) == 0 || name[0] == '=' { - return false, "", fs.failf("bad flag syntax: %s", s) - } - - // it's a flag. does it have an argument? - fs.args = fs.args[1:] - hasValue := false - value := "" - if i := strings.Index(name, "="); i != -1 { - value = trimQuotes(name[i+1:]) - hasValue = true - name = name[:i] - } - - m := fs.formal - flag, alreadythere := m[name] // BUG - if !alreadythere { - if name == "-help" || name == "help" || name == "h" { // special case for nice help message. - fs.usage() - return false, "", ErrHelp - } - if len(name) > 0 && name[0] == '-' { - return false, "", fs.failf("flag provided but not defined: -%s", name) - } - return false, name, ErrRetry - } - if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg - if hasValue { - if err := fv.Set(value); err != nil { - return false, "", fs.failf("invalid boolean value %q for -%s: %v", value, name, err) - } - } else { - fv.Set("true") - } - } else { - // It must have a value, which might be the next argument. - if !hasValue && len(fs.args) > 0 { - // value is the next arg - hasValue = true - value, fs.args = fs.args[0], fs.args[1:] - } - if !hasValue { - return false, "", fs.failf("flag needs an argument: -%s", name) - } - if err := flag.Value.Set(value); err != nil { - return false, "", fs.failf("invalid value %q for flag -%s: %v", value, name, err) - } - } - if fs.actual == nil { - fs.actual = make(map[string]*Flag) - } - fs.actual[name] = flag - for i, n := range flag.Names { - if n == fmt.Sprintf("#%s", name) { - replacement := "" - for j := i; j < len(flag.Names); j++ { - if flag.Names[j][0] != '#' { - replacement = flag.Names[j] - break - } - } - if replacement != "" { - fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) - } else { - fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) - } - } - } - return true, "", nil -} - -// Parse parses flag definitions from the argument list, which should not -// include the command name. Must be called after all flags in the FlagSet -// are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. -func (fs *FlagSet) Parse(arguments []string) error { - fs.parsed = true - fs.args = arguments - for { - seen, name, err := fs.parseOne() - if seen { - continue - } - if err == nil { - break - } - if err == ErrRetry { - if len(name) > 1 { - err = nil - for _, letter := range strings.Split(name, "") { - fs.args = append([]string{"-" + letter}, fs.args...) - seen2, _, err2 := fs.parseOne() - if seen2 { - continue - } - if err2 != nil { - err = fs.failf("flag provided but not defined: -%s", name) - break - } - } - if err == nil { - continue - } - } else { - err = fs.failf("flag provided but not defined: -%s", name) - } - } - switch fs.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -// ParseFlags is a utility function that adds a help flag if withHelp is true, -// calls fs.Parse(args) and prints a relevant error message if there are -// incorrect number of arguments. It returns error only if error handling is -// set to ContinueOnError and parsing fails. If error handling is set to -// ExitOnError, it's safe to ignore the return value. -func (fs *FlagSet) ParseFlags(args []string, withHelp bool) error { - var help *bool - if withHelp { - help = fs.Bool([]string{"#help", "-help"}, false, "Print usage") - } - if err := fs.Parse(args); err != nil { - return err - } - if help != nil && *help { - fs.SetOutput(os.Stdout) - fs.Usage() - os.Exit(0) - } - if str := fs.CheckArgs(); str != "" { - fs.SetOutput(os.Stderr) - fs.ReportError(str, withHelp) - fs.ShortUsage() - os.Exit(1) - } - return nil -} - -// ReportError is a utility method that prints a user-friendly message -// containing the error that occured during parsing and a suggestion to get help -func (fs *FlagSet) ReportError(str string, withHelp bool) { - if withHelp { - if os.Args[0] == fs.Name() { - str += ".\nSee '" + os.Args[0] + " --help'" - } else { - str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'" - } - } - fmt.Fprintf(fs.Out(), "docker: %s.\n", str) -} - -// Parsed reports whether fs.Parse has been called. -func (fs *FlagSet) Parsed() bool { - return fs.parsed -} - -// Parse parses the command-line flags from os.Args[1:]. Must be called -// after all flags are defined and before flags are accessed by the program. -func Parse() { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.Parse(os.Args[1:]) -} - -// Parsed returns true if the command-line flags have been parsed. -func Parsed() bool { - return CommandLine.Parsed() -} - -// CommandLine is the default set of command-line flags, parsed from os.Args. -// The top-level functions such as BoolVar, Arg, and on are wrappers for the -// methods of CommandLine. -var CommandLine = NewFlagSet(os.Args[0], ExitOnError) - -// NewFlagSet returns a new, empty flag set with the specified name and -// error handling property. -func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { - f := &FlagSet{ - name: name, - errorHandling: errorHandling, - } - return f -} - -// Init sets the name and error handling property for a flag set. -// By default, the zero FlagSet uses an empty name and the -// ContinueOnError error handling policy. -func (fs *FlagSet) Init(name string, errorHandling ErrorHandling) { - fs.name = name - fs.errorHandling = errorHandling -} - -type mergeVal struct { - Value - key string - fset *FlagSet -} - -func (v mergeVal) Set(s string) error { - return v.fset.Set(v.key, s) -} - -func (v mergeVal) IsBoolFlag() bool { - if b, ok := v.Value.(boolFlag); ok { - return b.IsBoolFlag() - } - return false -} - -// Merge is an helper function that merges n FlagSets into a single dest FlagSet -// In case of name collision between the flagsets it will apply -// the destination FlagSet's errorHandling behaviour. -func Merge(dest *FlagSet, flagsets ...*FlagSet) error { - for _, fset := range flagsets { - for k, f := range fset.formal { - if _, ok := dest.formal[k]; ok { - var err error - if fset.name == "" { - err = fmt.Errorf("flag redefined: %s", k) - } else { - err = fmt.Errorf("%s flag redefined: %s", fset.name, k) - } - fmt.Fprintln(fset.Out(), err.Error()) - // Happens only if flags are declared with identical names - switch dest.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - newF := *f - newF.Value = mergeVal{f.Value, k, fset} - dest.formal[k] = &newF - } - } - return nil -} - -// IsEmpty reports if the FlagSet is actually empty. -func (fs *FlagSet) IsEmpty() bool { - return len(fs.actual) == 0 -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go deleted file mode 100644 index a604a9e12..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go +++ /dev/null @@ -1,259 +0,0 @@ -// Package parsers provides helper functions to parse and validate different type -// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel -// operating system versions. -package parsers - -import ( - "fmt" - "net" - "net/url" - "path" - "runtime" - "strconv" - "strings" -) - -// ParseDockerDaemonHost parses the specified address and returns an address that will be used as the host. -// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr -// defaultUnixAddr must be a absolute file path (no `unix://` prefix) -// defaultTCPAddr must be the full `tcp://host:port` form -func ParseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) { - addr = strings.TrimSpace(addr) - if addr == "" { - if defaultAddr == defaultTLSHost { - return defaultTLSHost, nil - } - if runtime.GOOS != "windows" { - return fmt.Sprintf("unix://%s", defaultUnixAddr), nil - } - return defaultTCPAddr, nil - } - addrParts := strings.Split(addr, "://") - if len(addrParts) == 1 { - addrParts = []string{"tcp", addrParts[0]} - } - - switch addrParts[0] { - case "tcp": - return ParseTCPAddr(addrParts[1], defaultTCPAddr) - case "unix": - return ParseUnixAddr(addrParts[1], defaultUnixAddr) - case "fd": - return addr, nil - default: - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } -} - -// ParseUnixAddr parses and validates that the specified address is a valid UNIX -// socket address. It returns a formatted UNIX socket address, either using the -// address parsed from addr, or the contents of defaultAddr if addr is a blank -// string. -func ParseUnixAddr(addr string, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, "unix://") - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) - } - if addr == "" { - addr = defaultAddr - } - return fmt.Sprintf("unix://%s", addr), nil -} - -// ParseTCPAddr parses and validates that the specified address is a valid TCP -// address. It returns a formatted TCP address, either using the address parsed -// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. -// tryAddr is expected to have already been Trim()'d -// defaultAddr must be in the full `tcp://host:port` form -func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { - if tryAddr == "" || tryAddr == "tcp://" { - return defaultAddr, nil - } - addr := strings.TrimPrefix(tryAddr, "tcp://") - if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) - } - - u, err := url.Parse("tcp://" + addr) - if err != nil { - return "", err - } - - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") - defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) - if err != nil { - return "", err - } - - if host == "" { - host = defaultHost - } - if port == "" { - port = defaultPort - } - p, err := strconv.Atoi(port) - if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - if net.ParseIP(host).To4() == nil && strings.Contains(host, ":") { - // This is either an ipv6 address - host = "[" + host + "]" - } - return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil -} - -// ParseRepositoryTag gets a repos name and returns the right reposName + tag|digest -// The tag can be confusing because of a port in a repository name. -// Ex: localhost.localdomain:5000/samalba/hipache:latest -// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb -func ParseRepositoryTag(repos string) (string, string) { - n := strings.Index(repos, "@") - if n >= 0 { - parts := strings.Split(repos, "@") - return parts[0], parts[1] - } - n = strings.LastIndex(repos, ":") - if n < 0 { - return repos, "" - } - if tag := repos[n+1:]; !strings.Contains(tag, "/") { - return repos[:n], tag - } - return repos, "" -} - -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - -// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) -func ParseKeyValueOpt(opt string) (string, string, error) { - parts := strings.SplitN(opt, "=", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) - } - return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil -} - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} - -// ParseLink parses and validates the specified string as a link format (name:alias) -func ParseLink(val string) (string, string, error) { - if val == "" { - return "", "", fmt.Errorf("empty string specified for links") - } - arr := strings.Split(val, ":") - if len(arr) > 2 { - return "", "", fmt.Errorf("bad format for links: %s", val) - } - if len(arr) == 1 { - return val, val, nil - } - // This is kept because we can actually get an HostConfig with links - // from an already created container and the format is not `foo:bar` - // but `/foo:/c1/bar` - if strings.HasPrefix(arr[0], "/") { - _, alias := path.Split(arr[1]) - return arr[0][1:], alias, nil - } - return arr[0], arr[1], nil -} - -// ParseUintList parses and validates the specified string as the value -// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be -// one of the formats below. Note that duplicates are actually allowed in the -// input string. It returns a `map[int]bool` with available elements from `val` -// set to `true`. -// Supported formats: -// 7 -// 1-6 -// 0,3-4,7,8-10 -// 0-0,0,1-7 -// 03,1-3 <- this is gonna get parsed as [1,2,3] -// 3,2,1 -// 0-2,3,1 -func ParseUintList(val string) (map[int]bool, error) { - if val == "" { - return map[int]bool{}, nil - } - - availableInts := make(map[int]bool) - split := strings.Split(val, ",") - errInvalidFormat := fmt.Errorf("invalid format: %s", val) - - for _, r := range split { - if !strings.Contains(r, "-") { - v, err := strconv.Atoi(r) - if err != nil { - return nil, errInvalidFormat - } - availableInts[v] = true - } else { - split := strings.SplitN(r, "-", 2) - min, err := strconv.Atoi(split[0]) - if err != nil { - return nil, errInvalidFormat - } - max, err := strconv.Atoi(split[1]) - if err != nil { - return nil, errInvalidFormat - } - if max < min { - return nil, errInvalidFormat - } - for i := min; i <= max; i++ { - availableInts[i] = true - } - } - } - return availableInts, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go deleted file mode 100644 index 515fb4d05..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go +++ /dev/null @@ -1,119 +0,0 @@ -// Package pools provides a collection of pools which provide various -// data types with buffers. These can be used to lower the number of -// memory allocations and reuse buffers. -// -// New pools should be added to this package to allow them to be -// shared across packages. -// -// Utility functions which operate on pools should be added to this -// package to allow them to be reused. -package pools - -import ( - "bufio" - "io" - "sync" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils" -) - -var ( - // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. - BufioReader32KPool *BufioReaderPool - // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. - BufioWriter32KPool *BufioWriterPool -) - -const buffer32K = 32 * 1024 - -// BufioReaderPool is a bufio reader that uses sync.Pool. -type BufioReaderPool struct { - pool sync.Pool -} - -func init() { - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) -} - -// newBufioReaderPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - pool := sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, size) }, - } - return &BufioReaderPool{pool: pool} -} - -// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - buf := bufPool.pool.Get().(*bufio.Reader) - buf.Reset(r) - return buf -} - -// Put puts the bufio.Reader back into the pool. -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. -func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := BufioReader32KPool.Get(src) - written, err = io.Copy(dst, buf) - BufioReader32KPool.Put(buf) - return -} - -// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back -// into the pool and closes the reader if it's an io.ReadCloser. -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - readCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} - -// BufioWriterPool is a bufio writer that uses sync.Pool. -type BufioWriterPool struct { - pool sync.Pool -} - -// newBufioWriterPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - pool := sync.Pool{ - New: func() interface{} { return bufio.NewWriterSize(nil, size) }, - } - return &BufioWriterPool{pool: pool} -} - -// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - buf := bufPool.pool.Get().(*bufio.Writer) - buf.Reset(w) - return buf -} - -// Put puts the bufio.Writer back into the pool. -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back -// into the pool and closes the writer if it's an io.Writecloser. -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - writeCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go deleted file mode 100644 index dd52b9082..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go +++ /dev/null @@ -1,11 +0,0 @@ -package promise - -// Go is a basic promise implementation: it wraps calls a function in a goroutine, -// and returns a channel which will later return the function's return value. -func Go(f func() error) chan error { - ch := make(chan error, 1) - go func() { - ch <- f() - }() - return ch -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go deleted file mode 100644 index b2c60046a..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go +++ /dev/null @@ -1,175 +0,0 @@ -package stdcopy - -import ( - "encoding/binary" - "errors" - "io" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" -) - -const ( - stdWriterPrefixLen = 8 - stdWriterFdIndex = 0 - stdWriterSizeIndex = 4 - - startingBufLen = 32*1024 + stdWriterPrefixLen + 1 -) - -// StdType prefixes type and length to standard stream. -type StdType [stdWriterPrefixLen]byte - -var ( - // Stdin represents standard input stream type. - Stdin = StdType{0: 0} - // Stdout represents standard output stream type. - Stdout = StdType{0: 1} - // Stderr represents standard error steam type. - Stderr = StdType{0: 2} -) - -// StdWriter is wrapper of io.Writer with extra customized info. -type StdWriter struct { - io.Writer - prefix StdType - sizeBuf []byte -} - -func (w *StdWriter) Write(buf []byte) (n int, err error) { - var n1, n2 int - if w == nil || w.Writer == nil { - return 0, errors.New("Writer not instantiated") - } - binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) - n1, err = w.Writer.Write(w.prefix[:]) - if err != nil { - n = n1 - stdWriterPrefixLen - } else { - n2, err = w.Writer.Write(buf) - n = n1 + n2 - stdWriterPrefixLen - } - if n < 0 { - n = 0 - } - return -} - -// NewStdWriter instantiates a new Writer. -// Everything written to it will be encapsulated using a custom format, -// and written to the underlying `w` stream. -// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. -// `t` indicates the id of the stream to encapsulate. -// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. -func NewStdWriter(w io.Writer, t StdType) *StdWriter { - return &StdWriter{ - Writer: w, - prefix: t, - sizeBuf: make([]byte, 4), - } -} - -var errInvalidStdHeader = errors.New("Unrecognized input header") - -// StdCopy is a modified version of io.Copy. -// -// StdCopy will demultiplex `src`, assuming that it contains two streams, -// previously multiplexed together using a StdWriter instance. -// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. -// -// StdCopy will read until it hits EOF on `src`. It will then return a nil error. -// In other words: if `err` is non nil, it indicates a real underlying error. -// -// `written` will hold the total number of bytes written to `dstout` and `dsterr`. -func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { - var ( - buf = make([]byte, startingBufLen) - bufLen = len(buf) - nr, nw int - er, ew error - out io.Writer - frameSize int - ) - - for { - // Make sure we have at least a full header - for nr < stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < stdWriterPrefixLen { - logrus.Debugf("Corrupted prefix: %v", buf[:nr]) - return written, nil - } - break - } - if er != nil { - logrus.Debugf("Error reading header: %s", er) - return 0, er - } - } - - // Check the first byte to know where to write - switch buf[stdWriterFdIndex] { - case 0: - fallthrough - case 1: - // Write on stdout - out = dstout - case 2: - // Write on stderr - out = dsterr - default: - logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex]) - return 0, errInvalidStdHeader - } - - // Retrieve the size of the frame - frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) - logrus.Debugf("framesize: %d", frameSize) - - // Check if the buffer is big enough to read the frame. - // Extend it if necessary. - if frameSize+stdWriterPrefixLen > bufLen { - logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf)) - buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) - bufLen = len(buf) - } - - // While the amount of bytes read is less than the size of the frame + header, we keep reading - for nr < frameSize+stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < frameSize+stdWriterPrefixLen { - logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr]) - return written, nil - } - break - } - if er != nil { - logrus.Debugf("Error reading frame: %s", er) - return 0, er - } - } - - // Write the retrieved frame (without header) - nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) - if ew != nil { - logrus.Debugf("Error writing frame: %s", ew) - return 0, ew - } - // If the frame has not been fully written: error - if nw != frameSize { - logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize) - return 0, io.ErrShortWrite - } - written += int64(nw) - - // Move the rest of the buffer to the beginning - copy(buf, buf[frameSize+stdWriterPrefixLen:]) - // Move the index - nr -= frameSize + stdWriterPrefixLen - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go deleted file mode 100644 index 31ed9ff10..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go +++ /dev/null @@ -1,31 +0,0 @@ -package system - -import ( - "os" - "time" -) - -// Chtimes changes the access time and modified time of a file at the given path -func Chtimes(name string, atime time.Time, mtime time.Time) error { - unixMinTime := time.Unix(0, 0) - // The max Unix time is 33 bits set - unixMaxTime := unixMinTime.Add((1<<33 - 1) * time.Second) - - // If the modified time is prior to the Unix Epoch, or after the - // end of Unix Time, os.Chtimes has undefined behavior - // default to Unix Epoch in this case, just in case - - if atime.Before(unixMinTime) || atime.After(unixMaxTime) { - atime = unixMinTime - } - - if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { - mtime = unixMinTime - } - - if err := os.Chtimes(name, atime, mtime); err != nil { - return err - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go deleted file mode 100644 index 288318985..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package system - -import ( - "errors" -) - -var ( - // ErrNotSupportedPlatform means the platform is not supported. - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") -) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go deleted file mode 100644 index 04e2de787..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go +++ /dev/null @@ -1,83 +0,0 @@ -package system - -// This file implements syscalls for Win32 events which are not implemented -// in golang. - -import ( - "syscall" - "unsafe" -) - -var ( - procCreateEvent = modkernel32.NewProc("CreateEventW") - procOpenEvent = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") -) - -// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. -func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if manualReset { - _p1 = 1 - } - var _p2 uint32 - if initialState { - _p2 = 1 - } - r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. -func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if inheritHandle { - _p1 = 1 - } - r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// SetEvent implements win32 SetEvent func in golang. -func SetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procSetEvent) -} - -// ResetEvent implements win32 ResetEvent func in golang. -func ResetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procResetEvent) -} - -// PulseEvent implements win32 PulseEvent func in golang. -func PulseEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procPulseEvent) -} - -func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { - r0, _, _ := proc.Call(uintptr(handle)) - if r0 != 0 { - err = syscall.Errno(r0) - } - return -} - -var temp unsafe.Pointer - -// use ensures a variable is kept alive without the GC freeing while still needed -func use(p unsafe.Pointer) { - temp = p -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go deleted file mode 100644 index c14feb849..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package system - -import ( - "os" - "path/filepath" -) - -// MkdirAll creates a directory named path along with any necessary parents, -// with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. -func IsAbs(path string) bool { - return filepath.IsAbs(path) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go deleted file mode 100644 index 16823d551..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go +++ /dev/null @@ -1,82 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "path/filepath" - "regexp" - "strings" - "syscall" -) - -// MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, perm os.FileMode) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { - return nil - } - - // The rest of this method is copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = MkdirAll(path[0:j-1], perm) - if err != nil { - return err - } - } - - // Parent now exists; invoke Mkdir and use its result. - err = os.Mkdir(path, perm) - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if !filepath.IsAbs(path) { - if !strings.HasPrefix(path, string(os.PathSeparator)) { - return false - } - } - return true -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go deleted file mode 100644 index bd23c4d50..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Lstat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Lstat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Lstat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go deleted file mode 100644 index 49e87eb40..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package system - -import ( - "os" -) - -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -// Note the Linux version uses fromStatT to do the copy back, -// but that not strictly necessary when already in an OS specific module. -func Lstat(path string) (*StatT, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return &StatT{ - name: fi.Name(), - size: fi.Size(), - mode: fi.Mode(), - modTime: fi.ModTime(), - isDir: fi.IsDir()}, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go deleted file mode 100644 index 3b6e947e6..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -// MemInfo contains memory statistics of the host system. -type MemInfo struct { - // Total usable RAM (i.e. physical RAM minus a few reserved bits and the - // kernel binary code). - MemTotal int64 - - // Amount of free memory. - MemFree int64 - - // Total amount of swap space available. - SwapTotal int64 - - // Amount of swap space that is currently unused. - SwapFree int64 -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go deleted file mode 100644 index 9d83304ff..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go +++ /dev/null @@ -1,66 +0,0 @@ -package system - -import ( - "bufio" - "io" - "os" - "strconv" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units" -) - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return nil, err - } - defer file.Close() - return parseMemInfo(file) -} - -// parseMemInfo parses the /proc/meminfo file into -// a MemInfo object given a io.Reader to the file. -// -// Throws error if there are problems reading from the file -func parseMemInfo(reader io.Reader) (*MemInfo, error) { - meminfo := &MemInfo{} - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - // Expected format: ["MemTotal:", "1234", "kB"] - parts := strings.Fields(scanner.Text()) - - // Sanity checks: Skip malformed entries. - if len(parts) < 3 || parts[2] != "kB" { - continue - } - - // Convert to bytes. - size, err := strconv.Atoi(parts[1]) - if err != nil { - continue - } - bytes := int64(size) * units.KiB - - switch parts[0] { - case "MemTotal:": - meminfo.MemTotal = bytes - case "MemFree:": - meminfo.MemFree = bytes - case "SwapTotal:": - meminfo.SwapTotal = bytes - case "SwapFree:": - meminfo.SwapFree = bytes - } - - } - - // Handle errors that may have occurred during the reading of the file. - if err := scanner.Err(); err != nil { - return nil, err - } - - return meminfo, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go deleted file mode 100644 index 82ddd30c1..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !linux,!windows - -package system - -// ReadMemInfo is not supported on platforms other than linux and windows. -func ReadMemInfo() (*MemInfo, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go deleted file mode 100644 index d46642598..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go +++ /dev/null @@ -1,44 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &MemInfo{}, nil - } - return &MemInfo{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go deleted file mode 100644 index 73958182b..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev int) error { - return syscall.Mknod(path, mode, dev) -} - -// Mkdev is used to build the value of linux devices (in /dev/) which specifies major -// and minor number of the newly created device special file. -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go deleted file mode 100644 index 2e863c021..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package system - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go deleted file mode 100644 index 087034c5e..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// StatT type contains status of a file. It contains metadata -// like permission, owner, group, size, etc about a file. -type StatT struct { - mode uint32 - uid uint32 - gid uint32 - rdev uint64 - size int64 - mtim syscall.Timespec -} - -// Mode returns file's permission mode. -func (s StatT) Mode() uint32 { - return s.mode -} - -// UID returns file's user id of owner. -func (s StatT) UID() uint32 { - return s.uid -} - -// GID returns file's group id of owner. -func (s StatT) GID() uint32 { - return s.gid -} - -// Rdev returns file's device ID (if it's special file). -func (s StatT) Rdev() uint64 { - return s.rdev -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() syscall.Timespec { - return s.mtim -} - -// GetLastModification returns file's last modification time. -func (s StatT) GetLastModification() syscall.Timespec { - return s.Mtim() -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go deleted file mode 100644 index d0fb6f151..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} - -// Stat takes a path to a file and returns -// a system.Stat_t type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go deleted file mode 100644 index 8b1eded13..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go +++ /dev/null @@ -1,33 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - rdev: s.Rdev, - mtim: s.Mtim}, nil -} - -// FromStatT exists only on linux, and loads a system.StatT from a -// syscal.Stat_t. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go deleted file mode 100644 index 381ea8211..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !linux,!windows,!freebsd - -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go deleted file mode 100644 index 39490c625..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "time" -) - -// StatT type contains status of a file. It contains metadata -// like name, permission, size, etc about a file. -type StatT struct { - name string - size int64 - mode os.FileMode - modTime time.Time - isDir bool -} - -// Name returns file's name. -func (s StatT) Name() string { - return s.name -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mode returns file's permission mode. -func (s StatT) Mode() os.FileMode { - return s.mode -} - -// ModTime returns file's last modification time. -func (s StatT) ModTime() time.Time { - return s.modTime -} - -// IsDir returns whether file is actually a directory. -func (s StatT) IsDir() bool { - return s.isDir -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go deleted file mode 100644 index 50054765a..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux freebsd - -package system - -import "syscall" - -// Unmount is a platform-specific helper function to call -// the unmount syscall. -func Unmount(dest string) { - syscall.Unmount(dest, 0) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go deleted file mode 100644 index 3a3a55b26..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package system - -// Unmount is a platform-specific helper function to call -// the unmount syscall. Not supported on Windows -func Unmount(dest string) { -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go deleted file mode 100644 index c670fcd75..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Umask sets current process's file mode creation mask to newmask -// and return oldmask. -func Umask(newmask int) (oldmask int, err error) { - return syscall.Umask(newmask), nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go deleted file mode 100644 index 13f1de176..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package system - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go deleted file mode 100644 index 0a1619754..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go +++ /dev/null @@ -1,8 +0,0 @@ -package system - -import "syscall" - -// LUtimesNano is not supported by darwin platform. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go deleted file mode 100644 index e2eac3b55..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go +++ /dev/null @@ -1,22 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go deleted file mode 100644 index 007bfa8c0..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// LUtimesNano is used to change access and modification time of the speficied path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - // These are not currently available in syscall - atFdCwd := -100 - atSymLinkNoFollow := 0x100 - - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go deleted file mode 100644 index 50c3a0436..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux,!freebsd,!darwin - -package system - -import "syscall" - -// LUtimesNano is not supported on platforms other than linux, freebsd and darwin. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go deleted file mode 100644 index d2e2c0579..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go +++ /dev/null @@ -1,63 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// It will returns a nil slice and nil error if the xattr is not set. -func Lgetxattr(path string, attr string) ([]byte, error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - - dest := make([]byte, 128) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno == syscall.ENODATA { - return nil, nil - } - if errno == syscall.ERANGE { - dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - } - if errno != 0 { - return nil, errno - } - - return dest[:sz], nil -} - -var _zero uintptr - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go deleted file mode 100644 index 0114f2227..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package system - -// Lgetxattr is not supported on platforms other than linux. -func Lgetxattr(path string, attr string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -// Lsetxattr is not supported on platforms other than linux. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return ErrNotSupportedPlatform -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go deleted file mode 100644 index 8fb0d804d..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go +++ /dev/null @@ -1,111 +0,0 @@ -// Package ulimit provides structure and helper function to parse and represent -// resource limits (Rlimit and Ulimit, its human friendly version). -package ulimit - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// Parse parses and returns a Ulimit from the specified string. -func Parse(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - limitVals := strings.SplitN(parts[1], ":", 2) - if len(limitVals) > 2 { - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - soft, err := strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - - hard := soft // in case no hard was set - if len(limitVals) == 2 { - hard, err = strconv.ParseInt(limitVals[1], 10, 64) - } - if soft > hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard) - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go deleted file mode 100644 index c219a8a96..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go +++ /dev/null @@ -1,33 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours()); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*3 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go deleted file mode 100644 index 2fde3b412..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go +++ /dev/null @@ -1,95 +0,0 @@ -package units - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[string]int64 - -var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) -) - -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - i := 0 - for size >= base { - size = size / base - i++ - } - return fmt.Sprintf(format, size, _map[i]) -} - -// HumanSize returns a human-readable approximation of a size -// using SI standard (eg. "44kB", "17MB"). -func HumanSize(size float64) string { - return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 3 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - size, err := strconv.ParseInt(matches[1], 10, 0) - if err != nil { - return -1, err - } - - unitPrefix := strings.ToLower(matches[2]) - if mul, ok := uMap[unitPrefix]; ok { - size *= mul - } - - return size, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume.go deleted file mode 100644 index d986207c8..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume.go +++ /dev/null @@ -1,172 +0,0 @@ -package volume - -import ( - "os" - "runtime" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - derr "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// DefaultDriverName is the driver name used for the driver -// implemented in the local package. -const DefaultDriverName string = "local" - -// Driver is for creating and removing volumes. -type Driver interface { - // Name returns the name of the volume driver. - Name() string - // Create makes a new volume with the given id. - Create(name string, opts map[string]string) (Volume, error) - // Remove deletes the volume. - Remove(Volume) error -} - -// Volume is a place to store data. It is backed by a specific driver, and can be mounted. -type Volume interface { - // Name returns the name of the volume - Name() string - // DriverName returns the name of the driver which owns this volume. - DriverName() string - // Path returns the absolute path to the volume. - Path() string - // Mount mounts the volume and returns the absolute path to - // where it can be consumed. - Mount() (string, error) - // Unmount unmounts the volume when it is no longer in use. - Unmount() error -} - -// MountPoint is the intersection point between a volume and a container. It -// specifies which volume is to be used and where inside a container it should -// be mounted. -type MountPoint struct { - Source string // Container host directory - Destination string // Inside the container - RW bool // True if writable - Name string // Name set by user - Driver string // Volume driver to use - Volume Volume `json:"-"` - - // Note Mode is not used on Windows - Mode string `json:"Relabel"` // Originally field was `Relabel`" -} - -// Setup sets up a mount point by either mounting the volume if it is -// configured, or creating the source directory if supplied. -func (m *MountPoint) Setup() (string, error) { - if m.Volume != nil { - return m.Volume.Mount() - } - if len(m.Source) > 0 { - if _, err := os.Stat(m.Source); err != nil { - if !os.IsNotExist(err) { - return "", err - } - if runtime.GOOS != "windows" { // Windows does not have deprecation issues here - logrus.Warnf("Auto-creating non-existant volume host path %s, this is deprecated and will be removed soon", m.Source) - if err := system.MkdirAll(m.Source, 0755); err != nil { - return "", err - } - } - } - return m.Source, nil - } - return "", derr.ErrorCodeMountSetup -} - -// Path returns the path of a volume in a mount point. -func (m *MountPoint) Path() string { - if m.Volume != nil { - return m.Volume.Path() - } - return m.Source -} - -// ValidMountMode will make sure the mount mode is valid. -// returns if it's a valid mount mode or not. -func ValidMountMode(mode string) bool { - return roModes[strings.ToLower(mode)] || rwModes[strings.ToLower(mode)] -} - -// ReadWrite tells you if a mode string is a valid read-write mode or not. -func ReadWrite(mode string) bool { - return rwModes[strings.ToLower(mode)] -} - -// ParseVolumesFrom ensure that the supplied volumes-from is valid. -func ParseVolumesFrom(spec string) (string, string, error) { - if len(spec) == 0 { - return "", "", derr.ErrorCodeVolumeFromBlank.WithArgs(spec) - } - - specParts := strings.SplitN(spec, ":", 2) - id := specParts[0] - mode := "rw" - - if len(specParts) == 2 { - mode = specParts[1] - if !ValidMountMode(mode) { - return "", "", derr.ErrorCodeVolumeInvalidMode.WithArgs(mode) - } - } - return id, mode, nil -} - -// SplitN splits raw into a maximum of n parts, separated by a separator colon. -// A separator colon is the last `:` character in the regex `[/:\\]?[a-zA-Z]:` (note `\\` is `\` escaped). -// This allows to correctly split strings such as `C:\foo:D:\:rw`. -func SplitN(raw string, n int) []string { - var array []string - if len(raw) == 0 || raw[0] == ':' { - // invalid - return nil - } - // numberOfParts counts the number of parts separated by a separator colon - numberOfParts := 0 - // left represents the left-most cursor in raw, updated at every `:` character considered as a separator. - left := 0 - // right represents the right-most cursor in raw incremented with the loop. Note this - // starts at index 1 as index 0 is already handle above as a special case. - for right := 1; right < len(raw); right++ { - // stop parsing if reached maximum number of parts - if n >= 0 && numberOfParts >= n { - break - } - if raw[right] != ':' { - continue - } - potentialDriveLetter := raw[right-1] - if (potentialDriveLetter >= 'A' && potentialDriveLetter <= 'Z') || (potentialDriveLetter >= 'a' && potentialDriveLetter <= 'z') { - if right > 1 { - beforePotentialDriveLetter := raw[right-2] - if beforePotentialDriveLetter != ':' && beforePotentialDriveLetter != '/' && beforePotentialDriveLetter != '\\' { - // e.g. `C:` is not preceded by any delimiter, therefore it was not a drive letter but a path ending with `C:`. - array = append(array, raw[left:right]) - left = right + 1 - numberOfParts++ - } - // else, `C:` is considered as a drive letter and not as a delimiter, so we continue parsing. - } - // if right == 1, then `C:` is the beginning of the raw string, therefore `:` is again not considered a delimiter and we continue parsing. - } else { - // if `:` is not preceded by a potential drive letter, then consider it as a delimiter. - array = append(array, raw[left:right]) - left = right + 1 - numberOfParts++ - } - } - // need to take care of the last part - if left < len(raw) { - if n >= 0 && numberOfParts >= n { - // if the maximum number of parts is reached, just append the rest to the last part - // left-1 is at the last `:` that needs to be included since not considered a separator. - array[n-1] += raw[left-1:] - } else { - array = append(array, raw[left:]) - } - } - return array -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume_unix.go deleted file mode 100644 index db0fea5b8..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume_unix.go +++ /dev/null @@ -1,132 +0,0 @@ -// +build linux freebsd darwin - -package volume - -import ( - "fmt" - "path/filepath" - "strings" - - derr "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors" -) - -// read-write modes -var rwModes = map[string]bool{ - "rw": true, - "rw,Z": true, - "rw,z": true, - "z,rw": true, - "Z,rw": true, - "Z": true, - "z": true, -} - -// read-only modes -var roModes = map[string]bool{ - "ro": true, - "ro,Z": true, - "ro,z": true, - "z,ro": true, - "Z,ro": true, -} - -// BackwardsCompatible decides whether this mount point can be -// used in old versions of Docker or not. -// Only bind mounts and local volumes can be used in old versions of Docker. -func (m *MountPoint) BackwardsCompatible() bool { - return len(m.Source) > 0 || m.Driver == DefaultDriverName -} - -// HasResource checks whether the given absolute path for a container is in -// this mount point. If the relative path starts with `../` then the resource -// is outside of this mount point, but we can't simply check for this prefix -// because it misses `..` which is also outside of the mount, so check both. -func (m *MountPoint) HasResource(absolutePath string) bool { - relPath, err := filepath.Rel(m.Destination, absolutePath) - return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) -} - -// ParseMountSpec validates the configuration of mount information is valid. -func ParseMountSpec(spec, volumeDriver string) (*MountPoint, error) { - spec = filepath.ToSlash(spec) - - mp := &MountPoint{ - RW: true, - } - if strings.Count(spec, ":") > 2 { - return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) - } - - arr := strings.SplitN(spec, ":", 3) - if arr[0] == "" { - return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) - } - - switch len(arr) { - case 1: - // Just a destination path in the container - mp.Destination = filepath.Clean(arr[0]) - case 2: - if isValid := ValidMountMode(arr[1]); isValid { - // Destination + Mode is not a valid volume - volumes - // cannot include a mode. eg /foo:rw - return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) - } - // Host Source Path or Name + Destination - mp.Source = arr[0] - mp.Destination = arr[1] - case 3: - // HostSourcePath+DestinationPath+Mode - mp.Source = arr[0] - mp.Destination = arr[1] - mp.Mode = arr[2] // Mode field is used by SELinux to decide whether to apply label - if !ValidMountMode(mp.Mode) { - return nil, derr.ErrorCodeVolumeInvalidMode.WithArgs(mp.Mode) - } - mp.RW = ReadWrite(mp.Mode) - default: - return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) - } - - //validate the volumes destination path - mp.Destination = filepath.Clean(mp.Destination) - if !filepath.IsAbs(mp.Destination) { - return nil, derr.ErrorCodeVolumeAbs.WithArgs(mp.Destination) - } - - // Destination cannot be "/" - if mp.Destination == "/" { - return nil, derr.ErrorCodeVolumeSlash.WithArgs(spec) - } - - name, source := ParseVolumeSource(mp.Source) - if len(source) == 0 { - mp.Source = "" // Clear it out as we previously assumed it was not a name - mp.Driver = volumeDriver - if len(mp.Driver) == 0 { - mp.Driver = DefaultDriverName - } - } else { - mp.Source = filepath.Clean(source) - } - - mp.Name = name - - return mp, nil -} - -// ParseVolumeSource parses the origin sources that's mounted into the container. -// It returns a name and a source. It looks to see if the spec passed in -// is an absolute file. If it is, it assumes the spec is a source. If not, -// it assumes the spec is a name. -func ParseVolumeSource(spec string) (string, string) { - if !filepath.IsAbs(spec) { - return spec, "" - } - return "", spec -} - -// IsVolumeNameValid checks a volume name in a platform specific manner. -func IsVolumeNameValid(name string) (bool, error) { - return true, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume_windows.go deleted file mode 100644 index 3f0278015..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume_windows.go +++ /dev/null @@ -1,181 +0,0 @@ -package volume - -import ( - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - derr "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors" -) - -// read-write modes -var rwModes = map[string]bool{ - "rw": true, -} - -// read-only modes -var roModes = map[string]bool{ - "ro": true, -} - -const ( - // Spec should be in the format [source:]destination[:mode] - // - // Examples: c:\foo bar:d:rw - // c:\foo:d:\bar - // myname:d: - // d:\ - // - // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See - // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to - // test is https://regex-golang.appspot.com/assets/html/index.html - // - // Useful link for referencing named capturing groups: - // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex - // - // There are three match groups: source, destination and mode. - // - - // RXHostDir is the first option of a source - RXHostDir = `[a-z]:\\(?:[^\\/:*?"<>|\r\n]+\\?)*` - // RXName is the second option of a source - RXName = `[^\\/:*?"<>|\r\n]+` - // RXReservedNames are reserved names not possible on Windows - RXReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` - - // RXSource is the combined possiblities for a source - RXSource = `((?P((` + RXHostDir + `)|(` + RXName + `))):)?` - - // Source. Can be either a host directory, a name, or omitted: - // HostDir: - // - Essentially using the folder solution from - // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html - // but adding case insensitivity. - // - Must be an absolute path such as c:\path - // - Can include spaces such as `c:\program files` - // - And then followed by a colon which is not in the capture group - // - And can be optional - // Name: - // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) - // - And then followed by a colon which is not in the capture group - // - And can be optional - - // RXDestination is the regex expression for the mount destination - RXDestination = `(?P([a-z]):((?:\\[^\\/:*?"<>\r\n]+)*\\?))` - // Destination (aka container path): - // - Variation on hostdir but can be a drive followed by colon as well - // - If a path, must be absolute. Can include spaces - // - Drive cannot be c: (explicitly checked in code, not RegEx) - // - - // RXMode is the regex expression for the mode of the mount - RXMode = `(:(?P(?i)rw))?` - // Temporarily for TP4, disabling the use of ro as it's not supported yet - // in the platform. TODO Windows: `(:(?P(?i)ro|rw))?` - // mode (optional) - // - Hopefully self explanatory in comparison to above. - // - Colon is not in the capture group - // -) - -// ParseMountSpec validates the configuration of mount information is valid. -func ParseMountSpec(spec string, volumeDriver string) (*MountPoint, error) { - var specExp = regexp.MustCompile(`^` + RXSource + RXDestination + RXMode + `$`) - - // Ensure in platform semantics for matching. The CLI will send in Unix semantics. - match := specExp.FindStringSubmatch(filepath.FromSlash(strings.ToLower(spec))) - - // Must have something back - if len(match) == 0 { - return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) - } - - // Pull out the sub expressions from the named capture groups - matchgroups := make(map[string]string) - for i, name := range specExp.SubexpNames() { - matchgroups[name] = strings.ToLower(match[i]) - } - - mp := &MountPoint{ - Source: matchgroups["source"], - Destination: matchgroups["destination"], - RW: true, - } - if strings.ToLower(matchgroups["mode"]) == "ro" { - mp.RW = false - } - - // Volumes cannot include an explicitly supplied mode eg c:\path:rw - if mp.Source == "" && mp.Destination != "" && matchgroups["mode"] != "" { - return nil, derr.ErrorCodeVolumeInvalid.WithArgs(spec) - } - - // Note: No need to check if destination is absolute as it must be by - // definition of matching the regex. - - if filepath.VolumeName(mp.Destination) == mp.Destination { - // Ensure the destination path, if a drive letter, is not the c drive - if strings.ToLower(mp.Destination) == "c:" { - return nil, derr.ErrorCodeVolumeDestIsC.WithArgs(spec) - } - } else { - // So we know the destination is a path, not drive letter. Clean it up. - mp.Destination = filepath.Clean(mp.Destination) - // Ensure the destination path, if a path, is not the c root directory - if strings.ToLower(mp.Destination) == `c:\` { - return nil, derr.ErrorCodeVolumeDestIsCRoot.WithArgs(spec) - } - } - - // See if the source is a name instead of a host directory - if len(mp.Source) > 0 { - validName, err := IsVolumeNameValid(mp.Source) - if err != nil { - return nil, err - } - if validName { - // OK, so the source is a name. - mp.Name = mp.Source - mp.Source = "" - - // Set the driver accordingly - mp.Driver = volumeDriver - if len(mp.Driver) == 0 { - mp.Driver = DefaultDriverName - } - } else { - // OK, so the source must be a host directory. Make sure it's clean. - mp.Source = filepath.Clean(mp.Source) - } - } - - // Ensure the host path source, if supplied, exists and is a directory - if len(mp.Source) > 0 { - var fi os.FileInfo - var err error - if fi, err = os.Stat(mp.Source); err != nil { - return nil, derr.ErrorCodeVolumeSourceNotFound.WithArgs(mp.Source, err) - } - if !fi.IsDir() { - return nil, derr.ErrorCodeVolumeSourceNotDirectory.WithArgs(mp.Source) - } - } - - logrus.Debugf("MP: Source '%s', Dest '%s', RW %t, Name '%s', Driver '%s'", mp.Source, mp.Destination, mp.RW, mp.Name, mp.Driver) - return mp, nil -} - -// IsVolumeNameValid checks a volume name in a platform specific manner. -func IsVolumeNameValid(name string) (bool, error) { - nameExp := regexp.MustCompile(`^` + RXName + `$`) - if !nameExp.MatchString(name) { - return false, nil - } - nameExp = regexp.MustCompile(`^` + RXReservedNames + `$`) - if nameExp.MatchString(name) { - return false, derr.ErrorCodeVolumeNameReservedWord.WithArgs(name) - } - return true, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/MAINTAINERS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/MAINTAINERS deleted file mode 100644 index edbe20066..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Tianon Gravi (@tianon) -Aleksa Sarai (@cyphar) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup.go deleted file mode 100644 index 6f8a982ff..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup.go +++ /dev/null @@ -1,108 +0,0 @@ -package user - -import ( - "errors" - "fmt" - "syscall" -) - -var ( - // The current operating system does not provide the required data for user lookups. - ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") -) - -func lookupUser(filter func(u User) bool) (User, error) { - // Get operating system-specific passwd reader-closer. - passwd, err := GetPasswd() - if err != nil { - return User{}, err - } - defer passwd.Close() - - // Get the users. - users, err := ParsePasswdFilter(passwd, filter) - if err != nil { - return User{}, err - } - - // No user entries found. - if len(users) == 0 { - return User{}, fmt.Errorf("no matching entries in passwd file") - } - - // Assume the first entry is the "correct" one. - return users[0], nil -} - -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(syscall.Getuid()) -} - -// LookupUser looks up a user by their username in /etc/passwd. If the user -// cannot be found (or there is no /etc/passwd file on the filesystem), then -// LookupUser returns an error. -func LookupUser(username string) (User, error) { - return lookupUser(func(u User) bool { - return u.Name == username - }) -} - -// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot -// be found (or there is no /etc/passwd file on the filesystem), then LookupId -// returns an error. -func LookupUid(uid int) (User, error) { - return lookupUser(func(u User) bool { - return u.Uid == uid - }) -} - -func lookupGroup(filter func(g Group) bool) (Group, error) { - // Get operating system-specific group reader-closer. - group, err := GetGroup() - if err != nil { - return Group{}, err - } - defer group.Close() - - // Get the users. - groups, err := ParseGroupFilter(group, filter) - if err != nil { - return Group{}, err - } - - // No user entries found. - if len(groups) == 0 { - return Group{}, fmt.Errorf("no matching entries in group file") - } - - // Assume the first entry is the "correct" one. - return groups[0], nil -} - -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(syscall.Getgid()) -} - -// LookupGroup looks up a group by its name in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGroup -// returns an error. -func LookupGroup(groupname string) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Name == groupname - }) -} - -// LookupGid looks up a group by its group id in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGid -// returns an error. -func LookupGid(gid int) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Gid == gid - }) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unix.go deleted file mode 100644 index 758b734c2..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unix.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package user - -import ( - "io" - "os" -) - -// Unix-specific path to the passwd and group formatted files. -const ( - unixPasswdPath = "/etc/passwd" - unixGroupPath = "/etc/group" -) - -func GetPasswdPath() (string, error) { - return unixPasswdPath, nil -} - -func GetPasswd() (io.ReadCloser, error) { - return os.Open(unixPasswdPath) -} - -func GetGroupPath() (string, error) { - return unixGroupPath, nil -} - -func GetGroup() (io.ReadCloser, error) { - return os.Open(unixGroupPath) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unsupported.go deleted file mode 100644 index 721794887..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris - -package user - -import "io" - -func GetPasswdPath() (string, error) { - return "", ErrUnsupported -} - -func GetPasswd() (io.ReadCloser, error) { - return nil, ErrUnsupported -} - -func GetGroupPath() (string, error) { - return "", ErrUnsupported -} - -func GetGroup() (io.ReadCloser, error) { - return nil, ErrUnsupported -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user.go deleted file mode 100644 index 13226dbfa..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user.go +++ /dev/null @@ -1,407 +0,0 @@ -package user - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const ( - minId = 0 - maxId = 1<<31 - 1 //for 32-bit systems compatibility -) - -var ( - ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId) -) - -type User struct { - Name string - Pass string - Uid int - Gid int - Gecos string - Home string - Shell string -} - -type Group struct { - Name string - Pass string - Gid int - List []string -} - -func parseLine(line string, v ...interface{}) { - if line == "" { - return - } - - parts := strings.Split(line, ":") - for i, p := range parts { - if len(v) <= i { - // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files - break - } - - switch e := v[i].(type) { - case *string: - // "root", "adm", "/bin/bash" - *e = p - case *int: - // "0", "4", "1000" - // ignore string to int conversion errors, for great "tolerance" of naughty configuration files - *e, _ = strconv.Atoi(p) - case *[]string: - // "", "root", "root,adm,daemon" - if p != "" { - *e = strings.Split(p, ",") - } else { - *e = []string{} - } - default: - // panic, because this is a programming/logic error, not a runtime one - panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") - } - } -} - -func ParsePasswdFile(path string) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswd(passwd) -} - -func ParsePasswd(passwd io.Reader) ([]User, error) { - return ParsePasswdFilter(passwd, nil) -} - -func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswdFilter(passwd, filter) -} - -func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { - if r == nil { - return nil, fmt.Errorf("nil source for passwd-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []User{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" { - continue - } - - // see: man 5 passwd - // name:password:UID:GID:GECOS:directory:shell - // Name:Pass:Uid:Gid:Gecos:Home:Shell - // root:x:0:0:root:/root:/bin/bash - // adm:x:3:4:adm:/var/adm:/bin/false - p := User{} - parseLine( - text, - &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, - ) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -func ParseGroupFile(path string) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroup(group) -} - -func ParseGroup(group io.Reader) ([]Group, error) { - return ParseGroupFilter(group, nil) -} - -func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroupFilter(group, filter) -} - -func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { - if r == nil { - return nil, fmt.Errorf("nil source for group-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []Group{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := s.Text() - if text == "" { - continue - } - - // see: man 5 group - // group_name:password:GID:user_list - // Name:Pass:Gid:List - // root:x:0:root - // adm:x:4:root,adm,daemon - p := Group{} - parseLine( - text, - &p.Name, &p.Pass, &p.Gid, &p.List, - ) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -type ExecUser struct { - Uid, Gid int - Sgids []int - Home string -} - -// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the -// given file paths and uses that data as the arguments to GetExecUser. If the -// files cannot be opened for any reason, the error is ignored and a nil -// io.Reader is passed instead. -func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { - passwd, err := os.Open(passwdPath) - if err != nil { - passwd = nil - } else { - defer passwd.Close() - } - - group, err := os.Open(groupPath) - if err != nil { - group = nil - } else { - defer group.Close() - } - - return GetExecUser(userSpec, defaults, passwd, group) -} - -// GetExecUser parses a user specification string (using the passwd and group -// readers as sources for /etc/passwd and /etc/group data, respectively). In -// the case of blank fields or missing data from the sources, the values in -// defaults is used. -// -// GetExecUser will return an error if a user or group literal could not be -// found in any entry in passwd and group respectively. -// -// Examples of valid user specifications are: -// * "" -// * "user" -// * "uid" -// * "user:group" -// * "uid:gid -// * "user:gid" -// * "uid:group" -func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { - var ( - userArg, groupArg string - name string - ) - - if defaults == nil { - defaults = new(ExecUser) - } - - // Copy over defaults. - user := &ExecUser{ - Uid: defaults.Uid, - Gid: defaults.Gid, - Sgids: defaults.Sgids, - Home: defaults.Home, - } - - // Sgids slice *cannot* be nil. - if user.Sgids == nil { - user.Sgids = []int{} - } - - // allow for userArg to have either "user" syntax, or optionally "user:group" syntax - parseLine(userSpec, &userArg, &groupArg) - - users, err := ParsePasswdFilter(passwd, func(u User) bool { - if userArg == "" { - return u.Uid == user.Uid - } - return u.Name == userArg || strconv.Itoa(u.Uid) == userArg - }) - if err != nil && passwd != nil { - if userArg == "" { - userArg = strconv.Itoa(user.Uid) - } - return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) - } - - haveUser := users != nil && len(users) > 0 - if haveUser { - // if we found any user entries that matched our filter, let's take the first one as "correct" - name = users[0].Name - user.Uid = users[0].Uid - user.Gid = users[0].Gid - user.Home = users[0].Home - } else if userArg != "" { - // we asked for a user but didn't find them... let's check to see if we wanted a numeric user - user.Uid, err = strconv.Atoi(userArg) - if err != nil { - // not numeric - we have to bail - return nil, fmt.Errorf("Unable to find user %v", userArg) - } - - // Must be inside valid uid range. - if user.Uid < minId || user.Uid > maxId { - return nil, ErrRange - } - - // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit - } - - if groupArg != "" || name != "" { - groups, err := ParseGroupFilter(group, func(g Group) bool { - // Explicit group format takes precedence. - if groupArg != "" { - return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg - } - - // Check if user is a member. - for _, u := range g.List { - if u == name { - return true - } - } - - return false - }) - if err != nil && group != nil { - return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) - } - - haveGroup := groups != nil && len(groups) > 0 - if groupArg != "" { - if haveGroup { - // if we found any group entries that matched our filter, let's take the first one as "correct" - user.Gid = groups[0].Gid - } else { - // we asked for a group but didn't find id... let's check to see if we wanted a numeric group - user.Gid, err = strconv.Atoi(groupArg) - if err != nil { - // not numeric - we have to bail - return nil, fmt.Errorf("Unable to find group %v", groupArg) - } - - // Ensure gid is inside gid range. - if user.Gid < minId || user.Gid > maxId { - return nil, ErrRange - } - - // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit - } - } else if haveGroup { - // If implicit group format, fill supplementary gids. - user.Sgids = make([]int, len(groups)) - for i, group := range groups { - user.Sgids[i] = group.Gid - } - } - } - - return user, nil -} - -// GetAdditionalGroupsPath looks up a list of groups by name or group id -// against the group file. If a group name cannot be found, an error will be -// returned. If a group id cannot be found, it will be returned as-is. -func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { - groupReader, err := os.Open(groupPath) - if err != nil { - return nil, fmt.Errorf("Failed to open group file: %v", err) - } - defer groupReader.Close() - - groups, err := ParseGroupFilter(groupReader, func(g Group) bool { - for _, ag := range additionalGroups { - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - return true - } - } - return false - }) - if err != nil { - return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) - } - - gidMap := make(map[int]struct{}) - for _, ag := range additionalGroups { - var found bool - for _, g := range groups { - // if we found a matched group either by name or gid, take the - // first matched as correct - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - if _, ok := gidMap[g.Gid]; !ok { - gidMap[g.Gid] = struct{}{} - found = true - break - } - } - } - // we asked for a group but didn't find it. let's check to see - // if we wanted a numeric group - if !found { - gid, err := strconv.Atoi(ag) - if err != nil { - return nil, fmt.Errorf("Unable to find group %s", ag) - } - // Ensure gid is inside gid range. - if gid < minId || gid > maxId { - return nil, ErrRange - } - gidMap[gid] = struct{}{} - } - } - gids := []int{} - for gid := range gidMap { - gids = append(gids, gid) - } - return gids, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE deleted file mode 100644 index 0e5fb8728..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md deleted file mode 100644 index c60a31b05..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md +++ /dev/null @@ -1,7 +0,0 @@ -context -======= -[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) - -gorilla/context is a general purpose registry for global request variables. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go deleted file mode 100644 index 81cb128b1..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "sync" - "time" -) - -var ( - mutex sync.RWMutex - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) -) - -// Set stores a value for a given key in a given request. -func Set(r *http.Request, key, val interface{}) { - mutex.Lock() - if data[r] == nil { - data[r] = make(map[interface{}]interface{}) - datat[r] = time.Now().Unix() - } - data[r][key] = val - mutex.Unlock() -} - -// Get returns a value stored for a given key in a given request. -func Get(r *http.Request, key interface{}) interface{} { - mutex.RLock() - if ctx := data[r]; ctx != nil { - value := ctx[key] - mutex.RUnlock() - return value - } - mutex.RUnlock() - return nil -} - -// GetOk returns stored value and presence state like multi-value return of map access. -func GetOk(r *http.Request, key interface{}) (interface{}, bool) { - mutex.RLock() - if _, ok := data[r]; ok { - value, ok := data[r][key] - mutex.RUnlock() - return value, ok - } - mutex.RUnlock() - return nil, false -} - -// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. -func GetAll(r *http.Request) map[interface{}]interface{} { - mutex.RLock() - if context, ok := data[r]; ok { - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result - } - mutex.RUnlock() - return nil -} - -// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if -// the request was registered. -func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { - mutex.RLock() - context, ok := data[r] - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result, ok -} - -// Delete removes a value stored for a given key in a given request. -func Delete(r *http.Request, key interface{}) { - mutex.Lock() - if data[r] != nil { - delete(data[r], key) - } - mutex.Unlock() -} - -// Clear removes all values stored for a given request. -// -// This is usually called by a handler wrapper to clean up request -// variables at the end of a request lifetime. See ClearHandler(). -func Clear(r *http.Request) { - mutex.Lock() - clear(r) - mutex.Unlock() -} - -// clear is Clear without the lock. -func clear(r *http.Request) { - delete(data, r) - delete(datat, r) -} - -// Purge removes request data stored for longer than maxAge, in seconds. -// It returns the amount of requests removed. -// -// If maxAge <= 0, all request data is removed. -// -// This is only used for sanity check: in case context cleaning was not -// properly set some request data can be kept forever, consuming an increasing -// amount of memory. In case this is detected, Purge() must be called -// periodically until the problem is fixed. -func Purge(maxAge int) int { - mutex.Lock() - count := 0 - if maxAge <= 0 { - count = len(data) - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) - } else { - min := time.Now().Unix() - int64(maxAge) - for r := range data { - if datat[r] < min { - clear(r) - count++ - } - } - } - mutex.Unlock() - return count -} - -// ClearHandler wraps an http.Handler and clears request values at the end -// of a request lifetime. -func ClearHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer Clear(r) - h.ServeHTTP(w, r) - }) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go deleted file mode 100644 index 73c740031..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package context stores values shared during a request lifetime. - -For example, a router can set variables extracted from the URL and later -application handlers can access those values, or it can be used to store -sessions values to be saved at the end of a request. There are several -others common uses. - -The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: - - http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 - -Here's the basic usage: first define the keys that you will need. The key -type is interface{} so a key can be of any type that supports equality. -Here we define a key using a custom int type to avoid name collisions: - - package foo - - import ( - "github.com/gorilla/context" - ) - - type key int - - const MyKey key = 0 - -Then set a variable. Variables are bound to an http.Request object, so you -need a request instance to set a value: - - context.Set(r, MyKey, "bar") - -The application can later access the variable using the same key you provided: - - func MyHandler(w http.ResponseWriter, r *http.Request) { - // val is "bar". - val := context.Get(r, foo.MyKey) - - // returns ("bar", true) - val, ok := context.GetOk(r, foo.MyKey) - // ... - } - -And that's all about the basic usage. We discuss some other ideas below. - -Any type can be stored in the context. To enforce a given type, make the key -private and wrap Get() and Set() to accept and return values of a specific -type: - - type key int - - const mykey key = 0 - - // GetMyKey returns a value for this package from the request values. - func GetMyKey(r *http.Request) SomeType { - if rv := context.Get(r, mykey); rv != nil { - return rv.(SomeType) - } - return nil - } - - // SetMyKey sets a value for this package in the request values. - func SetMyKey(r *http.Request, val SomeType) { - context.Set(r, mykey, val) - } - -Variables must be cleared at the end of a request, to remove all values -that were stored. This can be done in an http.Handler, after a request was -served. Just call Clear() passing the request: - - context.Clear(r) - -...or use ClearHandler(), which conveniently wraps an http.Handler to clear -variables at the end of a request lifetime. - -The Routers from the packages gorilla/mux and gorilla/pat call Clear() -so if you are using either of them you don't need to clear the context manually. -*/ -package context diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE deleted file mode 100644 index 0e5fb8728..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md deleted file mode 100644 index 9a046ff97..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,235 +0,0 @@ -mux -=== -[![GoDoc](https://godoc.org/github.com/gorilla/securecookie?status.svg)](https://godoc.org/github.com/gorilla/securecookie) -[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) - -Package gorilla/mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts and paths can have variables with an optional regular - expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.example.com". - r.Host("www.example.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is `www.example.com`. Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.example.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -`www.example.com`, because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -## Full Example - -Here's a complete, runnable example of a small mux based server: - -```go -package main - -import ( - "net/http" - - "github.com/gorilla/mux" -) - -func YourHandler(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("Gorilla!\n")) -} - -func main() { - r := mux.NewRouter() - // Routes consist of a path and a handler function. - r.HandleFunc("/", YourHandler) - - // Bind to a port and pass our router in - http.ListenAndServe(":8000", r) -} -``` - -## License - -BSD licensed. See the LICENSE file for details. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go deleted file mode 100644 index 49798cb5c..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package gorilla/mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts and paths can have variables with an optional regular - expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.example.com". - r.Host("www.example.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is "www.example.com". Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.example.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -"www.example.com", because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -*/ -package mux diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go deleted file mode 100644 index 21c9d9ae7..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "path" - "regexp" - - "github.com/fsouza/go-dockerclient/external/github.com/gorilla/context" -) - -// NewRouter returns a new router instance. -func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} -} - -// Router registers routes to be matched and dispatches a handler. -// -// It implements the http.Handler interface, so it can be registered to serve -// requests: -// -// var router = mux.NewRouter() -// -// func main() { -// http.Handle("/", router) -// } -// -// Or, for Google App Engine, register it in a init() function: -// -// func init() { -// http.Handle("/", router) -// } -// -// This will send all incoming requests to the router. -type Router struct { - // Configurable Handler to be used when no route matches. - NotFoundHandler http.Handler - // Parent route, if this is a subrouter. - parent parentRoute - // Routes to be matched, in order. - routes []*Route - // Routes by name for URL building. - namedRoutes map[string]*Route - // See Router.StrictSlash(). This defines the flag for new routes. - strictSlash bool - // If true, do not clear the request context after handling the request - KeepContext bool -} - -// Match matches registered routes against the request. -func (r *Router) Match(req *http.Request, match *RouteMatch) bool { - for _, route := range r.routes { - if route.Match(req, match) { - return true - } - } - return false -} - -// ServeHTTP dispatches the handler registered in the matched route. -// -// When there is a match, the route variables can be retrieved calling -// mux.Vars(request). -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // Clean path to canonical form and redirect. - if p := cleanPath(req.URL.Path); p != req.URL.Path { - - // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return - } - var match RouteMatch - var handler http.Handler - if r.Match(req, &match) { - handler = match.Handler - setVars(req, match.Vars) - setCurrentRoute(req, match.Route) - } - if handler == nil { - handler = r.NotFoundHandler - if handler == nil { - handler = http.NotFoundHandler() - } - } - if !r.KeepContext { - defer context.Clear(req) - } - handler.ServeHTTP(w, req) -} - -// Get returns a route registered with the given name. -func (r *Router) Get(name string) *Route { - return r.getNamedRoutes()[name] -} - -// GetRoute returns a route registered with the given name. This method -// was renamed to Get() and remains here for backwards compatibility. -func (r *Router) GetRoute(name string) *Route { - return r.getNamedRoutes()[name] -} - -// StrictSlash defines the trailing slash behavior for new routes. The initial -// value is false. -// -// When true, if the route path is "/path/", accessing "/path" will redirect -// to the former and vice versa. In other words, your application will always -// see the path as specified in the route. -// -// When false, if the route path is "/path", accessing "/path/" will not match -// this route and vice versa. -// -// Special case: when a route sets a path prefix using the PathPrefix() method, -// strict slash is ignored for that route because the redirect behavior can't -// be determined from a prefix alone. However, any subrouters created from that -// route inherit the original StrictSlash setting. -func (r *Router) StrictSlash(value bool) *Router { - r.strictSlash = value - return r -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// getNamedRoutes returns the map where named routes are registered. -func (r *Router) getNamedRoutes() map[string]*Route { - if r.namedRoutes == nil { - if r.parent != nil { - r.namedRoutes = r.parent.getNamedRoutes() - } else { - r.namedRoutes = make(map[string]*Route) - } - } - return r.namedRoutes -} - -// getRegexpGroup returns regexp definitions from the parent route, if any. -func (r *Router) getRegexpGroup() *routeRegexpGroup { - if r.parent != nil { - return r.parent.getRegexpGroup() - } - return nil -} - -func (r *Router) buildVars(m map[string]string) map[string]string { - if r.parent != nil { - m = r.parent.buildVars(m) - } - return m -} - -// ---------------------------------------------------------------------------- -// Route factories -// ---------------------------------------------------------------------------- - -// NewRoute registers an empty route. -func (r *Router) NewRoute() *Route { - route := &Route{parent: r, strictSlash: r.strictSlash} - r.routes = append(r.routes, route) - return route -} - -// Handle registers a new route with a matcher for the URL path. -// See Route.Path() and Route.Handler(). -func (r *Router) Handle(path string, handler http.Handler) *Route { - return r.NewRoute().Path(path).Handler(handler) -} - -// HandleFunc registers a new route with a matcher for the URL path. -// See Route.Path() and Route.HandlerFunc(). -func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, - *http.Request)) *Route { - return r.NewRoute().Path(path).HandlerFunc(f) -} - -// Headers registers a new route with a matcher for request header values. -// See Route.Headers(). -func (r *Router) Headers(pairs ...string) *Route { - return r.NewRoute().Headers(pairs...) -} - -// Host registers a new route with a matcher for the URL host. -// See Route.Host(). -func (r *Router) Host(tpl string) *Route { - return r.NewRoute().Host(tpl) -} - -// MatcherFunc registers a new route with a custom matcher function. -// See Route.MatcherFunc(). -func (r *Router) MatcherFunc(f MatcherFunc) *Route { - return r.NewRoute().MatcherFunc(f) -} - -// Methods registers a new route with a matcher for HTTP methods. -// See Route.Methods(). -func (r *Router) Methods(methods ...string) *Route { - return r.NewRoute().Methods(methods...) -} - -// Path registers a new route with a matcher for the URL path. -// See Route.Path(). -func (r *Router) Path(tpl string) *Route { - return r.NewRoute().Path(tpl) -} - -// PathPrefix registers a new route with a matcher for the URL path prefix. -// See Route.PathPrefix(). -func (r *Router) PathPrefix(tpl string) *Route { - return r.NewRoute().PathPrefix(tpl) -} - -// Queries registers a new route with a matcher for URL query values. -// See Route.Queries(). -func (r *Router) Queries(pairs ...string) *Route { - return r.NewRoute().Queries(pairs...) -} - -// Schemes registers a new route with a matcher for URL schemes. -// See Route.Schemes(). -func (r *Router) Schemes(schemes ...string) *Route { - return r.NewRoute().Schemes(schemes...) -} - -// BuildVars registers a new route with a custom function for modifying -// route variables before building a URL. -func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { - return r.NewRoute().BuildVarsFunc(f) -} - -// Walk walks the router and all its sub-routers, calling walkFn for each route -// in the tree. The routes are walked in the order they were added. Sub-routers -// are explored depth-first. -func (r *Router) Walk(walkFn WalkFunc) error { - return r.walk(walkFn, []*Route{}) -} - -// SkipRouter is used as a return value from WalkFuncs to indicate that the -// router that walk is about to descend down to should be skipped. -var SkipRouter = errors.New("skip this router") - -// WalkFunc is the type of the function called for each route visited by Walk. -// At every invocation, it is given the current route, and the current router, -// and a list of ancestor routes that lead to the current route. -type WalkFunc func(route *Route, router *Router, ancestors []*Route) error - -func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { - for _, t := range r.routes { - if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" { - continue - } - - err := walkFn(t, r, ancestors) - if err == SkipRouter { - continue - } - for _, sr := range t.matchers { - if h, ok := sr.(*Router); ok { - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - } - } - if h, ok := t.handler.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - return nil -} - -// ---------------------------------------------------------------------------- -// Context -// ---------------------------------------------------------------------------- - -// RouteMatch stores information about a matched route. -type RouteMatch struct { - Route *Route - Handler http.Handler - Vars map[string]string -} - -type contextKey int - -const ( - varsKey contextKey = iota - routeKey -) - -// Vars returns the route variables for the current request, if any. -func Vars(r *http.Request) map[string]string { - if rv := context.Get(r, varsKey); rv != nil { - return rv.(map[string]string) - } - return nil -} - -// CurrentRoute returns the matched route for the current request, if any. -// This only works when called inside the handler of the matched route -// because the matched route is stored in the request context which is cleared -// after the handler returns, unless the KeepContext option is set on the -// Router. -func CurrentRoute(r *http.Request) *Route { - if rv := context.Get(r, routeKey); rv != nil { - return rv.(*Route) - } - return nil -} - -func setVars(r *http.Request, val interface{}) { - context.Set(r, varsKey, val) -} - -func setCurrentRoute(r *http.Request, val interface{}) { - context.Set(r, routeKey, val) -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -// cleanPath returns the canonical path for p, eliminating . and .. elements. -// Borrowed from the net/http package. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} - -// uniqueVars returns an error if two slices contain duplicated strings. -func uniqueVars(s1, s2 []string) error { - for _, v1 := range s1 { - for _, v2 := range s2 { - if v1 == v2 { - return fmt.Errorf("mux: duplicated route variable %q", v2) - } - } - } - return nil -} - -// checkPairs returns the count of strings passed in, and an error if -// the count is not an even number. -func checkPairs(pairs ...string) (int, error) { - length := len(pairs) - if length%2 != 0 { - return length, fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - } - return length, nil -} - -// mapFromPairsToString converts variadic string parameters to a -// string to string map. -func mapFromPairsToString(pairs ...string) (map[string]string, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]string, length/2) - for i := 0; i < length; i += 2 { - m[pairs[i]] = pairs[i+1] - } - return m, nil -} - -// mapFromPairsToRegex converts variadic string paramers to a -// string to regex map. -func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]*regexp.Regexp, length/2) - for i := 0; i < length; i += 2 { - regex, err := regexp.Compile(pairs[i+1]) - if err != nil { - return nil, err - } - m[pairs[i]] = regex - } - return m, nil -} - -// matchInArray returns true if the given string value is in the array. -func matchInArray(arr []string, value string) bool { - for _, v := range arr { - if v == value { - return true - } - } - return false -} - -// matchMapWithString returns true if the given key/value pairs exist in a given map. -func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != "" { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v == value { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against -// the given regex -func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != nil { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v.MatchString(value) { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go deleted file mode 100644 index 06728dd54..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" -) - -// newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host, a path or a query string. -// -// It will extract named variables, assemble a regexp to be matched, create -// a "reverse" template to build URLs and compile regexps to validate variable -// values used in URL building. -// -// Previously we accepted only Python-like identifiers for variable -// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that -// name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { - // Check if it is well-formed. - idxs, errBraces := braceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - // Backup the original. - template := tpl - // Now let's parse it. - defaultPattern := "[^/]+" - if matchQuery { - defaultPattern = "[^?&]*" - } else if matchHost { - defaultPattern = "[^.]+" - matchPrefix = false - } - // Only match strict slash if not matching - if matchPrefix || matchHost || matchQuery { - strictSlash = false - } - // Set a flag for strictSlash. - endSlash := false - if strictSlash && strings.HasSuffix(tpl, "/") { - tpl = tpl[:len(tpl)-1] - endSlash = true - } - varsN := make([]string, len(idxs)/2) - varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("") - pattern.WriteByte('^') - reverse := bytes.NewBufferString("") - var end int - var err error - for i := 0; i < len(idxs); i += 2 { - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := defaultPattern - if len(parts) == 2 { - patt = parts[1] - } - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("mux: missing name or pattern in %q", - tpl[idxs[i]:end]) - } - // Build the regexp pattern. - varIdx := i / 2 - fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(varIdx), patt) - // Build the reverse template. - fmt.Fprintf(reverse, "%s%%s", raw) - - // Append variable name and compiled pattern. - varsN[varIdx] = name - varsR[varIdx], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) - if err != nil { - return nil, err - } - } - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - if strictSlash { - pattern.WriteString("[/]?") - } - if matchQuery { - // Add the default pattern if the query value is empty - if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { - pattern.WriteString(defaultPattern) - } - } - if !matchPrefix { - pattern.WriteByte('$') - } - reverse.WriteString(raw) - if endSlash { - reverse.WriteByte('/') - } - // Compile full regexp. - reg, errCompile := regexp.Compile(pattern.String()) - if errCompile != nil { - return nil, errCompile - } - // Done! - return &routeRegexp{ - template: template, - matchHost: matchHost, - matchQuery: matchQuery, - strictSlash: strictSlash, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, - }, nil -} - -// routeRegexp stores a regexp to match a host or path and information to -// collect and validate route variables. -type routeRegexp struct { - // The unmodified template. - template string - // True for host match, false for path or query string match. - matchHost bool - // True for query string match, false for path and host match. - matchQuery bool - // The strictSlash value defined on the route, but disabled if PathPrefix was used. - strictSlash bool - // Expanded regexp. - regexp *regexp.Regexp - // Reverse template. - reverse string - // Variable names. - varsN []string - // Variable regexps (validators). - varsR []*regexp.Regexp -} - -// Match matches the regexp against the URL host or path. -func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if !r.matchHost { - if r.matchQuery { - return r.matchQueryString(req) - } else { - return r.regexp.MatchString(req.URL.Path) - } - } - return r.regexp.MatchString(getHost(req)) -} - -// url builds a URL part using the given values. -func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN)) - for k, v := range r.varsN { - value, ok := values[v] - if !ok { - return "", fmt.Errorf("mux: missing route variable %q", v) - } - urlValues[k] = value - } - rv := fmt.Sprintf(r.reverse, urlValues...) - if !r.regexp.MatchString(rv) { - // The URL is checked against the full regexp, instead of checking - // individual variables. This is faster but to provide a good error - // message, we check individual regexps if the URL doesn't match. - for k, v := range r.varsN { - if !r.varsR[k].MatchString(values[v]) { - return "", fmt.Errorf( - "mux: variable %q doesn't match, expected %q", values[v], - r.varsR[k].String()) - } - } - } - return rv, nil -} - -// getUrlQuery returns a single query parameter from a request URL. -// For a URL with foo=bar&baz=ding, we return only the relevant key -// value pair for the routeRegexp. -func (r *routeRegexp) getUrlQuery(req *http.Request) string { - if !r.matchQuery { - return "" - } - templateKey := strings.SplitN(r.template, "=", 2)[0] - for key, vals := range req.URL.Query() { - if key == templateKey && len(vals) > 0 { - return key + "=" + vals[0] - } - } - return "" -} - -func (r *routeRegexp) matchQueryString(req *http.Request) bool { - return r.regexp.MatchString(r.getUrlQuery(req)) -} - -// braceIndices returns the first level curly brace indices from a string. -// It returns an error in case of unbalanced braces. -func braceIndices(s string) ([]int, error) { - var level, idx int - idxs := make([]int, 0) - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - return idxs, nil -} - -// varGroupName builds a capturing group name for the indexed variable. -func varGroupName(idx int) string { - return "v" + strconv.Itoa(idx) -} - -// ---------------------------------------------------------------------------- -// routeRegexpGroup -// ---------------------------------------------------------------------------- - -// routeRegexpGroup groups the route matchers that carry variables. -type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp - queries []*routeRegexp -} - -// setMatch extracts the variables from the URL once a route matches. -func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { - // Store host variables. - if v.host != nil { - hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) - if hostVars != nil { - subexpNames := v.host.regexp.SubexpNames() - varName := 0 - for i, name := range subexpNames[1:] { - if name != "" && name == varGroupName(varName) { - m.Vars[v.host.varsN[varName]] = hostVars[i+1] - varName++ - } - } - } - } - // Store path variables. - if v.path != nil { - pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) - if pathVars != nil { - subexpNames := v.path.regexp.SubexpNames() - varName := 0 - for i, name := range subexpNames[1:] { - if name != "" && name == varGroupName(varName) { - m.Vars[v.path.varsN[varName]] = pathVars[i+1] - varName++ - } - } - // Check if we should redirect. - if v.path.strictSlash { - p1 := strings.HasSuffix(req.URL.Path, "/") - p2 := strings.HasSuffix(v.path.template, "/") - if p1 != p2 { - u, _ := url.Parse(req.URL.String()) - if p1 { - u.Path = u.Path[:len(u.Path)-1] - } else { - u.Path += "/" - } - m.Handler = http.RedirectHandler(u.String(), 301) - } - } - } - } - // Store query string variables. - for _, q := range v.queries { - queryVars := q.regexp.FindStringSubmatch(q.getUrlQuery(req)) - if queryVars != nil { - subexpNames := q.regexp.SubexpNames() - varName := 0 - for i, name := range subexpNames[1:] { - if name != "" && name == varGroupName(varName) { - m.Vars[q.varsN[varName]] = queryVars[i+1] - varName++ - } - } - } - } -} - -// getHost tries its best to return the request host. -func getHost(r *http.Request) string { - if r.URL.IsAbs() { - return r.URL.Host - } - host := r.Host - // Slice off any port information. - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - return host - -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go deleted file mode 100644 index 890130460..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go +++ /dev/null @@ -1,603 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// Route stores information to match a request and build URLs. -type Route struct { - // Parent where the route was registered (a Router). - parent parentRoute - // Request handler for the route. - handler http.Handler - // List of matchers. - matchers []matcher - // Manager for the variables from host and path. - regexp *routeRegexpGroup - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - // If true, this route never matches: it is only used to build URLs. - buildOnly bool - // The name used to build URLs. - name string - // Error resulted from building a route. - err error - - buildVarsFunc BuildVarsFunc -} - -// Match matches the route against the request. -func (r *Route) Match(req *http.Request, match *RouteMatch) bool { - if r.buildOnly || r.err != nil { - return false - } - // Match everything. - for _, m := range r.matchers { - if matched := m.Match(req, match); !matched { - return false - } - } - // Yay, we have a match. Let's collect some info about it. - if match.Route == nil { - match.Route = r - } - if match.Handler == nil { - match.Handler = r.handler - } - if match.Vars == nil { - match.Vars = make(map[string]string) - } - // Set variables. - if r.regexp != nil { - r.regexp.setMatch(req, match, r) - } - return true -} - -// ---------------------------------------------------------------------------- -// Route attributes -// ---------------------------------------------------------------------------- - -// GetError returns an error resulted from building the route, if any. -func (r *Route) GetError() error { - return r.err -} - -// BuildOnly sets the route to never match: it is only used to build URLs. -func (r *Route) BuildOnly() *Route { - r.buildOnly = true - return r -} - -// Handler -------------------------------------------------------------------- - -// Handler sets a handler for the route. -func (r *Route) Handler(handler http.Handler) *Route { - if r.err == nil { - r.handler = handler - } - return r -} - -// HandlerFunc sets a handler function for the route. -func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { - return r.Handler(http.HandlerFunc(f)) -} - -// GetHandler returns the handler for the route, if any. -func (r *Route) GetHandler() http.Handler { - return r.handler -} - -// Name ----------------------------------------------------------------------- - -// Name sets the name for the route, used to build URLs. -// If the name was registered already it will be overwritten. -func (r *Route) Name(name string) *Route { - if r.name != "" { - r.err = fmt.Errorf("mux: route already has name %q, can't set %q", - r.name, name) - } - if r.err == nil { - r.name = name - r.getNamedRoutes()[name] = r - } - return r -} - -// GetName returns the name for the route, if any. -func (r *Route) GetName() string { - return r.name -} - -// ---------------------------------------------------------------------------- -// Matchers -// ---------------------------------------------------------------------------- - -// matcher types try to match a request. -type matcher interface { - Match(*http.Request, *RouteMatch) bool -} - -// addMatcher adds a matcher to the route. -func (r *Route) addMatcher(m matcher) *Route { - if r.err == nil { - r.matchers = append(r.matchers, m) - } - return r -} - -// addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { - if r.err != nil { - return r.err - } - r.regexp = r.getRegexpGroup() - if !matchHost && !matchQuery { - if len(tpl) == 0 || tpl[0] != '/' { - return fmt.Errorf("mux: path must start with a slash, got %q", tpl) - } - if r.regexp.path != nil { - tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl - } - } - rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) - if err != nil { - return err - } - for _, q := range r.regexp.queries { - if err = uniqueVars(rr.varsN, q.varsN); err != nil { - return err - } - } - if matchHost { - if r.regexp.path != nil { - if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { - return err - } - } - r.regexp.host = rr - } else { - if r.regexp.host != nil { - if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { - return err - } - } - if matchQuery { - r.regexp.queries = append(r.regexp.queries, rr) - } else { - r.regexp.path = rr - } - } - r.addMatcher(rr) - return nil -} - -// Headers -------------------------------------------------------------------- - -// headerMatcher matches the request against header values. -type headerMatcher map[string]string - -func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithString(m, r.Header, true) -} - -// Headers adds a matcher for request header values. -// It accepts a sequence of key/value pairs to be matched. For example: -// -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both request header values match. -// Alternatively, you can provide a regular expression and match the header as follows: -// -// r.Headers("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will the same as the previous example, with the addition of matching -// application/text as well. -// -// It the value is an empty string, it will match any value if the key is set. -func (r *Route) Headers(pairs ...string) *Route { - if r.err == nil { - var headers map[string]string - headers, r.err = mapFromPairsToString(pairs...) - return r.addMatcher(headerMatcher(headers)) - } - return r -} - -// headerRegexMatcher matches the request against the route given a regex for the header -type headerRegexMatcher map[string]*regexp.Regexp - -func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithRegex(m, r.Header, true) -} - -// Regular expressions can be used with headers as well. -// It accepts a sequence of key/value pairs, where the value has regex support. For example -// r := mux.NewRouter() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both the request header matches both regular expressions. -// It the value is an empty string, it will match any value if the key is set. -func (r *Route) HeadersRegexp(pairs ...string) *Route { - if r.err == nil { - var headers map[string]*regexp.Regexp - headers, r.err = mapFromPairsToRegex(pairs...) - return r.addMatcher(headerRegexMatcher(headers)) - } - return r -} - -// Host ----------------------------------------------------------------------- - -// Host adds a matcher for the URL host. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next dot. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Host("www.example.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, true, false, false) - return r -} - -// MatcherFunc ---------------------------------------------------------------- - -// MatcherFunc is the function signature used by custom matchers. -type MatcherFunc func(*http.Request, *RouteMatch) bool - -func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { - return m(r, match) -} - -// MatcherFunc adds a custom function to be used as request matcher. -func (r *Route) MatcherFunc(f MatcherFunc) *Route { - return r.addMatcher(f) -} - -// Methods -------------------------------------------------------------------- - -// methodMatcher matches the request against HTTP methods. -type methodMatcher []string - -func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.Method) -} - -// Methods adds a matcher for HTTP methods. -// It accepts a sequence of one or more methods to be matched, e.g.: -// "GET", "POST", "PUT". -func (r *Route) Methods(methods ...string) *Route { - for k, v := range methods { - methods[k] = strings.ToUpper(v) - } - return r.addMatcher(methodMatcher(methods)) -} - -// Path ----------------------------------------------------------------------- - -// Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. The -// template must start with a "/". -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, false, false) - return r -} - -// PathPrefix ----------------------------------------------------------------- - -// PathPrefix adds a matcher for the URL path prefix. This matches if the given -// template is a prefix of the full URL path. See Route.Path() for details on -// the tpl argument. -// -// Note that it does not treat slashes specially ("/foobar/" will be matched by -// the prefix "/foo") so you may want to use a trailing slash here. -// -// Also note that the setting of Router.StrictSlash() has no effect on routes -// with a PathPrefix matcher. -func (r *Route) PathPrefix(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, true, false) - return r -} - -// Query ---------------------------------------------------------------------- - -// Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. Values may define variables. -// For example: -// -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") -// -// The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&id=42. -// -// It the value is an empty string, it will match any value if the key is set. -// -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -func (r *Route) Queries(pairs ...string) *Route { - length := len(pairs) - if length%2 != 0 { - r.err = fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - return nil - } - for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil { - return r - } - } - - return r -} - -// Schemes -------------------------------------------------------------------- - -// schemeMatcher matches the request against URL schemes. -type schemeMatcher []string - -func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.URL.Scheme) -} - -// Schemes adds a matcher for URL schemes. -// It accepts a sequence of schemes to be matched, e.g.: "http", "https". -func (r *Route) Schemes(schemes ...string) *Route { - for k, v := range schemes { - schemes[k] = strings.ToLower(v) - } - return r.addMatcher(schemeMatcher(schemes)) -} - -// BuildVarsFunc -------------------------------------------------------------- - -// BuildVarsFunc is the function signature used by custom build variable -// functions (which can modify route variables before a route's URL is built). -type BuildVarsFunc func(map[string]string) map[string]string - -// BuildVarsFunc adds a custom function to be used to modify build variables -// before a route's URL is built. -func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { - r.buildVarsFunc = f - return r -} - -// Subrouter ------------------------------------------------------------------ - -// Subrouter creates a subrouter for the route. -// -// It will test the inner routes only if the parent route matched. For example: -// -// r := mux.NewRouter() -// s := r.Host("www.example.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -// -// Here, the routes registered in the subrouter won't be tested if the host -// doesn't match. -func (r *Route) Subrouter() *Router { - router := &Router{parent: r, strictSlash: r.strictSlash} - r.addMatcher(router) - return router -} - -// ---------------------------------------------------------------------------- -// URL building -// ---------------------------------------------------------------------------- - -// URL builds a URL for the route. -// -// It accepts a sequence of key/value pairs for the route variables. For -// example, given this route: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// ...a URL for it can be built using: -// -// url, err := r.Get("article").URL("category", "technology", "id", "42") -// -// ...which will return an url.URL with the following path: -// -// "/articles/technology/42" -// -// This also works for host variables: -// -// r := mux.NewRouter() -// r.Host("{subdomain}.domain.com"). -// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") -// -// All variables defined in the route are required, and their values must -// conform to the corresponding patterns. -func (r *Route) URL(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil { - return nil, errors.New("mux: route doesn't have a host or path") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - var scheme, host, path string - if r.regexp.host != nil { - // Set a default scheme. - scheme = "http" - if host, err = r.regexp.host.url(values); err != nil { - return nil, err - } - } - if r.regexp.path != nil { - if path, err = r.regexp.path.url(values); err != nil { - return nil, err - } - } - return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, - }, nil -} - -// URLHost builds the host part of the URL for a route. See Route.URL(). -// -// The route must have a host defined. -func (r *Route) URLHost(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.host == nil { - return nil, errors.New("mux: route doesn't have a host") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - host, err := r.regexp.host.url(values) - if err != nil { - return nil, err - } - return &url.URL{ - Scheme: "http", - Host: host, - }, nil -} - -// URLPath builds the path part of the URL for a route. See Route.URL(). -// -// The route must have a path defined. -func (r *Route) URLPath(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.path == nil { - return nil, errors.New("mux: route doesn't have a path") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - path, err := r.regexp.path.url(values) - if err != nil { - return nil, err - } - return &url.URL{ - Path: path, - }, nil -} - -// prepareVars converts the route variable pairs into a map. If the route has a -// BuildVarsFunc, it is invoked. -func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { - m, err := mapFromPairsToString(pairs...) - if err != nil { - return nil, err - } - return r.buildVars(m), nil -} - -func (r *Route) buildVars(m map[string]string) map[string]string { - if r.parent != nil { - m = r.parent.buildVars(m) - } - if r.buildVarsFunc != nil { - m = r.buildVarsFunc(m) - } - return m -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// parentRoute allows routes to know about parent host and path definitions. -type parentRoute interface { - getNamedRoutes() map[string]*Route - getRegexpGroup() *routeRegexpGroup - buildVars(map[string]string) map[string]string -} - -// getNamedRoutes returns the map where named routes are registered. -func (r *Route) getNamedRoutes() map[string]*Route { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - return r.parent.getNamedRoutes() -} - -// getRegexpGroup returns regexp definitions from this route. -func (r *Route) getRegexpGroup() *routeRegexpGroup { - if r.regexp == nil { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - regexp := r.parent.getRegexpGroup() - if regexp == nil { - r.regexp = new(routeRegexpGroup) - } else { - // Copy. - r.regexp = &routeRegexpGroup{ - host: regexp.host, - path: regexp.path, - queries: regexp.queries, - } - } - } - return r.regexp -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/LICENSE deleted file mode 100644 index e87a115e4..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md deleted file mode 100644 index 036e5313f..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# cleanhttp - -Functions for accessing "clean" Go http.Client values - -------------- - -The Go standard library contains a default `http.Client` called -`http.DefaultClient`. It is a common idiom in Go code to start with -`http.DefaultClient` and tweak it as necessary, and in fact, this is -encouraged; from the `http` package documentation: - -> The Client's Transport typically has internal state (cached TCP connections), -so Clients should be reused instead of created as needed. Clients are safe for -concurrent use by multiple goroutines. - -Unfortunately, this is a shared value, and it is not uncommon for libraries to -assume that they are free to modify it at will. With enough dependencies, it -can be very easy to encounter strange problems and race conditions due to -manipulation of this shared value across libraries and goroutines (clients are -safe for concurrent use, but writing values to the client struct itself is not -protected). - -Making things worse is the fact that a bare `http.Client` will use a default -`http.Transport` called `http.DefaultTransport`, which is another global value -that behaves the same way. So it is not simply enough to replace -`http.DefaultClient` with `&http.Client{}`. - -This repository provides some simple functions to get a "clean" `http.Client` --- one that uses the same default values as the Go standard library, but -returns a client that does not share any state with other clients. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/cleanhttp.go deleted file mode 100644 index 1676d79cb..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ /dev/null @@ -1,28 +0,0 @@ -package cleanhttp - -import ( - "net" - "net/http" - "time" -) - -// DefaultTransport returns a new http.Transport with the same default values -// as http.DefaultTransport -func DefaultTransport() *http.Transport { - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - } -} - -// DefaultClient returns a new http.Client with the same default values as -// http.Client, but with a non-shared Transport -func DefaultClient() *http.Client { - return &http.Client{ - Transport: DefaultTransport(), - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS deleted file mode 100644 index edbe20066..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Tianon Gravi (@tianon) -Aleksa Sarai (@cyphar) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go deleted file mode 100644 index 6f8a982ff..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go +++ /dev/null @@ -1,108 +0,0 @@ -package user - -import ( - "errors" - "fmt" - "syscall" -) - -var ( - // The current operating system does not provide the required data for user lookups. - ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") -) - -func lookupUser(filter func(u User) bool) (User, error) { - // Get operating system-specific passwd reader-closer. - passwd, err := GetPasswd() - if err != nil { - return User{}, err - } - defer passwd.Close() - - // Get the users. - users, err := ParsePasswdFilter(passwd, filter) - if err != nil { - return User{}, err - } - - // No user entries found. - if len(users) == 0 { - return User{}, fmt.Errorf("no matching entries in passwd file") - } - - // Assume the first entry is the "correct" one. - return users[0], nil -} - -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(syscall.Getuid()) -} - -// LookupUser looks up a user by their username in /etc/passwd. If the user -// cannot be found (or there is no /etc/passwd file on the filesystem), then -// LookupUser returns an error. -func LookupUser(username string) (User, error) { - return lookupUser(func(u User) bool { - return u.Name == username - }) -} - -// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot -// be found (or there is no /etc/passwd file on the filesystem), then LookupId -// returns an error. -func LookupUid(uid int) (User, error) { - return lookupUser(func(u User) bool { - return u.Uid == uid - }) -} - -func lookupGroup(filter func(g Group) bool) (Group, error) { - // Get operating system-specific group reader-closer. - group, err := GetGroup() - if err != nil { - return Group{}, err - } - defer group.Close() - - // Get the users. - groups, err := ParseGroupFilter(group, filter) - if err != nil { - return Group{}, err - } - - // No user entries found. - if len(groups) == 0 { - return Group{}, fmt.Errorf("no matching entries in group file") - } - - // Assume the first entry is the "correct" one. - return groups[0], nil -} - -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(syscall.Getgid()) -} - -// LookupGroup looks up a group by its name in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGroup -// returns an error. -func LookupGroup(groupname string) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Name == groupname - }) -} - -// LookupGid looks up a group by its group id in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGid -// returns an error. -func LookupGid(gid int) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Gid == gid - }) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go deleted file mode 100644 index 758b734c2..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package user - -import ( - "io" - "os" -) - -// Unix-specific path to the passwd and group formatted files. -const ( - unixPasswdPath = "/etc/passwd" - unixGroupPath = "/etc/group" -) - -func GetPasswdPath() (string, error) { - return unixPasswdPath, nil -} - -func GetPasswd() (io.ReadCloser, error) { - return os.Open(unixPasswdPath) -} - -func GetGroupPath() (string, error) { - return unixGroupPath, nil -} - -func GetGroup() (io.ReadCloser, error) { - return os.Open(unixGroupPath) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go deleted file mode 100644 index 721794887..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris - -package user - -import "io" - -func GetPasswdPath() (string, error) { - return "", ErrUnsupported -} - -func GetPasswd() (io.ReadCloser, error) { - return nil, ErrUnsupported -} - -func GetGroupPath() (string, error) { - return "", ErrUnsupported -} - -func GetGroup() (io.ReadCloser, error) { - return nil, ErrUnsupported -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go deleted file mode 100644 index e6375ea4d..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go +++ /dev/null @@ -1,418 +0,0 @@ -package user - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const ( - minId = 0 - maxId = 1<<31 - 1 //for 32-bit systems compatibility -) - -var ( - ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId) -) - -type User struct { - Name string - Pass string - Uid int - Gid int - Gecos string - Home string - Shell string -} - -type Group struct { - Name string - Pass string - Gid int - List []string -} - -func parseLine(line string, v ...interface{}) { - if line == "" { - return - } - - parts := strings.Split(line, ":") - for i, p := range parts { - if len(v) <= i { - // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files - break - } - - switch e := v[i].(type) { - case *string: - // "root", "adm", "/bin/bash" - *e = p - case *int: - // "0", "4", "1000" - // ignore string to int conversion errors, for great "tolerance" of naughty configuration files - *e, _ = strconv.Atoi(p) - case *[]string: - // "", "root", "root,adm,daemon" - if p != "" { - *e = strings.Split(p, ",") - } else { - *e = []string{} - } - default: - // panic, because this is a programming/logic error, not a runtime one - panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") - } - } -} - -func ParsePasswdFile(path string) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswd(passwd) -} - -func ParsePasswd(passwd io.Reader) ([]User, error) { - return ParsePasswdFilter(passwd, nil) -} - -func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswdFilter(passwd, filter) -} - -func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { - if r == nil { - return nil, fmt.Errorf("nil source for passwd-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []User{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" { - continue - } - - // see: man 5 passwd - // name:password:UID:GID:GECOS:directory:shell - // Name:Pass:Uid:Gid:Gecos:Home:Shell - // root:x:0:0:root:/root:/bin/bash - // adm:x:3:4:adm:/var/adm:/bin/false - p := User{} - parseLine( - text, - &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, - ) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -func ParseGroupFile(path string) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroup(group) -} - -func ParseGroup(group io.Reader) ([]Group, error) { - return ParseGroupFilter(group, nil) -} - -func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroupFilter(group, filter) -} - -func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { - if r == nil { - return nil, fmt.Errorf("nil source for group-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []Group{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := s.Text() - if text == "" { - continue - } - - // see: man 5 group - // group_name:password:GID:user_list - // Name:Pass:Gid:List - // root:x:0:root - // adm:x:4:root,adm,daemon - p := Group{} - parseLine( - text, - &p.Name, &p.Pass, &p.Gid, &p.List, - ) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -type ExecUser struct { - Uid, Gid int - Sgids []int - Home string -} - -// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the -// given file paths and uses that data as the arguments to GetExecUser. If the -// files cannot be opened for any reason, the error is ignored and a nil -// io.Reader is passed instead. -func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { - passwd, err := os.Open(passwdPath) - if err != nil { - passwd = nil - } else { - defer passwd.Close() - } - - group, err := os.Open(groupPath) - if err != nil { - group = nil - } else { - defer group.Close() - } - - return GetExecUser(userSpec, defaults, passwd, group) -} - -// GetExecUser parses a user specification string (using the passwd and group -// readers as sources for /etc/passwd and /etc/group data, respectively). In -// the case of blank fields or missing data from the sources, the values in -// defaults is used. -// -// GetExecUser will return an error if a user or group literal could not be -// found in any entry in passwd and group respectively. -// -// Examples of valid user specifications are: -// * "" -// * "user" -// * "uid" -// * "user:group" -// * "uid:gid -// * "user:gid" -// * "uid:group" -func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { - var ( - userArg, groupArg string - name string - ) - - if defaults == nil { - defaults = new(ExecUser) - } - - // Copy over defaults. - user := &ExecUser{ - Uid: defaults.Uid, - Gid: defaults.Gid, - Sgids: defaults.Sgids, - Home: defaults.Home, - } - - // Sgids slice *cannot* be nil. - if user.Sgids == nil { - user.Sgids = []int{} - } - - // allow for userArg to have either "user" syntax, or optionally "user:group" syntax - parseLine(userSpec, &userArg, &groupArg) - - users, err := ParsePasswdFilter(passwd, func(u User) bool { - if userArg == "" { - return u.Uid == user.Uid - } - return u.Name == userArg || strconv.Itoa(u.Uid) == userArg - }) - if err != nil && passwd != nil { - if userArg == "" { - userArg = strconv.Itoa(user.Uid) - } - return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) - } - - haveUser := users != nil && len(users) > 0 - if haveUser { - // if we found any user entries that matched our filter, let's take the first one as "correct" - name = users[0].Name - user.Uid = users[0].Uid - user.Gid = users[0].Gid - user.Home = users[0].Home - } else if userArg != "" { - // we asked for a user but didn't find them... let's check to see if we wanted a numeric user - user.Uid, err = strconv.Atoi(userArg) - if err != nil { - // not numeric - we have to bail - return nil, fmt.Errorf("Unable to find user %v", userArg) - } - - // Must be inside valid uid range. - if user.Uid < minId || user.Uid > maxId { - return nil, ErrRange - } - - // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit - } - - if groupArg != "" || name != "" { - groups, err := ParseGroupFilter(group, func(g Group) bool { - // Explicit group format takes precedence. - if groupArg != "" { - return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg - } - - // Check if user is a member. - for _, u := range g.List { - if u == name { - return true - } - } - - return false - }) - if err != nil && group != nil { - return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) - } - - haveGroup := groups != nil && len(groups) > 0 - if groupArg != "" { - if haveGroup { - // if we found any group entries that matched our filter, let's take the first one as "correct" - user.Gid = groups[0].Gid - } else { - // we asked for a group but didn't find id... let's check to see if we wanted a numeric group - user.Gid, err = strconv.Atoi(groupArg) - if err != nil { - // not numeric - we have to bail - return nil, fmt.Errorf("Unable to find group %v", groupArg) - } - - // Ensure gid is inside gid range. - if user.Gid < minId || user.Gid > maxId { - return nil, ErrRange - } - - // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit - } - } else if haveGroup { - // If implicit group format, fill supplementary gids. - user.Sgids = make([]int, len(groups)) - for i, group := range groups { - user.Sgids[i] = group.Gid - } - } - } - - return user, nil -} - -// GetAdditionalGroups looks up a list of groups by name or group id -// against the given /etc/group formatted data. If a group name cannot -// be found, an error will be returned. If a group id cannot be found, -// or the given group data is nil, the id will be returned as-is -// provided it is in the legal range. -func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { - var groups = []Group{} - if group != nil { - var err error - groups, err = ParseGroupFilter(group, func(g Group) bool { - for _, ag := range additionalGroups { - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - return true - } - } - return false - }) - if err != nil { - return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) - } - } - - gidMap := make(map[int]struct{}) - for _, ag := range additionalGroups { - var found bool - for _, g := range groups { - // if we found a matched group either by name or gid, take the - // first matched as correct - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - if _, ok := gidMap[g.Gid]; !ok { - gidMap[g.Gid] = struct{}{} - found = true - break - } - } - } - // we asked for a group but didn't find it. let's check to see - // if we wanted a numeric group - if !found { - gid, err := strconv.Atoi(ag) - if err != nil { - return nil, fmt.Errorf("Unable to find group %s", ag) - } - // Ensure gid is inside gid range. - if gid < minId || gid > maxId { - return nil, ErrRange - } - gidMap[gid] = struct{}{} - } - } - gids := []int{} - for gid := range gidMap { - gids = append(gids, gid) - } - return gids, nil -} - -// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups -// that opens the groupPath given and gives it as an argument to -// GetAdditionalGroups. -func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { - group, err := os.Open(groupPath) - if err == nil { - defer group.Close() - } - return GetAdditionalGroups(additionalGroups, group) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go deleted file mode 100644 index 9d21da286..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go +++ /dev/null @@ -1,595 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "os" - "time" -) - -// APIImages represent an image returned in the ListImages call. -type APIImages struct { - ID string `json:"Id" yaml:"Id"` - RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty"` - Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` - Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` - VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"` - ParentID string `json:"ParentId,omitempty" yaml:"ParentId,omitempty"` - RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` -} - -// Image is the type representing a docker image and its various properties -type Image struct { - ID string `json:"Id" yaml:"Id"` - Parent string `json:"Parent,omitempty" yaml:"Parent,omitempty"` - Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty"` - Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"` - Container string `json:"Container,omitempty" yaml:"Container,omitempty"` - ContainerConfig Config `json:"ContainerConfig,omitempty" yaml:"ContainerConfig,omitempty"` - DockerVersion string `json:"DockerVersion,omitempty" yaml:"DockerVersion,omitempty"` - Author string `json:"Author,omitempty" yaml:"Author,omitempty"` - Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"` - Architecture string `json:"Architecture,omitempty" yaml:"Architecture,omitempty"` - Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` - VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"` -} - -// ImagePre012 serves the same purpose as the Image type except that it is for -// earlier versions of the Docker API (pre-012 to be specific) -type ImagePre012 struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - Container string `json:"container,omitempty"` - ContainerConfig Config `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - Config *Config `json:"config,omitempty"` - Architecture string `json:"architecture,omitempty"` - Size int64 `json:"size,omitempty"` -} - -var ( - // ErrNoSuchImage is the error returned when the image does not exist. - ErrNoSuchImage = errors.New("no such image") - - // ErrMissingRepo is the error returned when the remote repository is - // missing. - ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'") - - // ErrMissingOutputStream is the error returned when no output stream - // is provided to some calls, like BuildImage. - ErrMissingOutputStream = errors.New("missing output stream") - - // ErrMultipleContexts is the error returned when both a ContextDir and - // InputStream are provided in BuildImageOptions - ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream") - - // ErrMustSpecifyNames is the error rreturned when the Names field on - // ExportImagesOptions is nil or empty - ErrMustSpecifyNames = errors.New("must specify at least one name to export") -) - -// ListImagesOptions specify parameters to the ListImages function. -// -// See https://goo.gl/xBe1u3 for more details. -type ListImagesOptions struct { - All bool - Filters map[string][]string - Digests bool -} - -// ListImages returns the list of available images in the server. -// -// See https://goo.gl/xBe1u3 for more details. -func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) { - path := "/images/json?" + queryString(opts) - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var images []APIImages - if err := json.NewDecoder(resp.Body).Decode(&images); err != nil { - return nil, err - } - return images, nil -} - -// ImageHistory represent a layer in an image's history returned by the -// ImageHistory call. -type ImageHistory struct { - ID string `json:"Id" yaml:"Id"` - Tags []string `json:"Tags,omitempty" yaml:"Tags,omitempty"` - Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` - CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty"` - Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` -} - -// ImageHistory returns the history of the image by its name or ID. -// -// See https://goo.gl/8bnTId for more details. -func (c *Client) ImageHistory(name string) ([]ImageHistory, error) { - resp, err := c.do("GET", "/images/"+name+"/history", doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, ErrNoSuchImage - } - return nil, err - } - defer resp.Body.Close() - var history []ImageHistory - if err := json.NewDecoder(resp.Body).Decode(&history); err != nil { - return nil, err - } - return history, nil -} - -// RemoveImage removes an image by its name or ID. -// -// See https://goo.gl/V3ZWnK for more details. -func (c *Client) RemoveImage(name string) error { - resp, err := c.do("DELETE", "/images/"+name, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return ErrNoSuchImage - } - return err - } - resp.Body.Close() - return nil -} - -// RemoveImageOptions present the set of options available for removing an image -// from a registry. -// -// See https://goo.gl/V3ZWnK for more details. -type RemoveImageOptions struct { - Force bool `qs:"force"` - NoPrune bool `qs:"noprune"` -} - -// RemoveImageExtended removes an image by its name or ID. -// Extra params can be passed, see RemoveImageOptions -// -// See https://goo.gl/V3ZWnK for more details. -func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error { - uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts)) - resp, err := c.do("DELETE", uri, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return ErrNoSuchImage - } - return err - } - resp.Body.Close() - return nil -} - -// InspectImage returns an image by its name or ID. -// -// See https://goo.gl/jHPcg6 for more details. -func (c *Client) InspectImage(name string) (*Image, error) { - resp, err := c.do("GET", "/images/"+name+"/json", doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, ErrNoSuchImage - } - return nil, err - } - defer resp.Body.Close() - - var image Image - - // if the caller elected to skip checking the server's version, assume it's the latest - if c.SkipServerVersionCheck || c.expectedAPIVersion.GreaterThanOrEqualTo(apiVersion112) { - if err := json.NewDecoder(resp.Body).Decode(&image); err != nil { - return nil, err - } - } else { - var imagePre012 ImagePre012 - if err := json.NewDecoder(resp.Body).Decode(&imagePre012); err != nil { - return nil, err - } - - image.ID = imagePre012.ID - image.Parent = imagePre012.Parent - image.Comment = imagePre012.Comment - image.Created = imagePre012.Created - image.Container = imagePre012.Container - image.ContainerConfig = imagePre012.ContainerConfig - image.DockerVersion = imagePre012.DockerVersion - image.Author = imagePre012.Author - image.Config = imagePre012.Config - image.Architecture = imagePre012.Architecture - image.Size = imagePre012.Size - } - - return &image, nil -} - -// PushImageOptions represents options to use in the PushImage method. -// -// See https://goo.gl/zPtZaT for more details. -type PushImageOptions struct { - // Name of the image - Name string - - // Tag of the image - Tag string - - // Registry server to push the image - Registry string - - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` -} - -// PushImage pushes an image to a remote registry, logging progress to w. -// -// An empty instance of AuthConfiguration may be used for unauthenticated -// pushes. -// -// See https://goo.gl/zPtZaT for more details. -func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error { - if opts.Name == "" { - return ErrNoSuchImage - } - headers, err := headersWithAuth(auth) - if err != nil { - return err - } - name := opts.Name - opts.Name = "" - path := "/images/" + name + "/push?" + queryString(&opts) - return c.stream("POST", path, streamOptions{ - setRawTerminal: true, - rawJSONStream: opts.RawJSONStream, - headers: headers, - stdout: opts.OutputStream, - }) -} - -// PullImageOptions present the set of options available for pulling an image -// from a registry. -// -// See https://goo.gl/iJkZjD for more details. -type PullImageOptions struct { - Repository string `qs:"fromImage"` - Registry string - Tag string - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` -} - -// PullImage pulls an image from a remote registry, logging progress to -// opts.OutputStream. -// -// See https://goo.gl/iJkZjD for more details. -func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error { - if opts.Repository == "" { - return ErrNoSuchImage - } - - headers, err := headersWithAuth(auth) - if err != nil { - return err - } - return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream) -} - -func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool) error { - path := "/images/create?" + qs - return c.stream("POST", path, streamOptions{ - setRawTerminal: true, - rawJSONStream: rawJSONStream, - headers: headers, - in: in, - stdout: w, - }) -} - -// LoadImageOptions represents the options for LoadImage Docker API Call -// -// See https://goo.gl/JyClMX for more details. -type LoadImageOptions struct { - InputStream io.Reader -} - -// LoadImage imports a tarball docker image -// -// See https://goo.gl/JyClMX for more details. -func (c *Client) LoadImage(opts LoadImageOptions) error { - return c.stream("POST", "/images/load", streamOptions{ - setRawTerminal: true, - in: opts.InputStream, - }) -} - -// ExportImageOptions represent the options for ExportImage Docker API call. -// -// See https://goo.gl/le7vK8 for more details. -type ExportImageOptions struct { - Name string - OutputStream io.Writer -} - -// ExportImage exports an image (as a tar file) into the stream. -// -// See https://goo.gl/le7vK8 for more details. -func (c *Client) ExportImage(opts ExportImageOptions) error { - return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - }) -} - -// ExportImagesOptions represent the options for ExportImages Docker API call -// -// See https://goo.gl/huC7HA for more details. -type ExportImagesOptions struct { - Names []string - OutputStream io.Writer `qs:"-"` -} - -// ExportImages exports one or more images (as a tar file) into the stream -// -// See https://goo.gl/huC7HA for more details. -func (c *Client) ExportImages(opts ExportImagesOptions) error { - if opts.Names == nil || len(opts.Names) == 0 { - return ErrMustSpecifyNames - } - return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - }) -} - -// ImportImageOptions present the set of informations available for importing -// an image from a source file or the stdin. -// -// See https://goo.gl/iJkZjD for more details. -type ImportImageOptions struct { - Repository string `qs:"repo"` - Source string `qs:"fromSrc"` - Tag string `qs:"tag"` - - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` -} - -// ImportImage imports an image from a url, a file or stdin -// -// See https://goo.gl/iJkZjD for more details. -func (c *Client) ImportImage(opts ImportImageOptions) error { - if opts.Repository == "" { - return ErrNoSuchImage - } - if opts.Source != "-" { - opts.InputStream = nil - } - if opts.Source != "-" && !isURL(opts.Source) { - f, err := os.Open(opts.Source) - if err != nil { - return err - } - opts.InputStream = f - opts.Source = "-" - } - return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream) -} - -// BuildImageOptions present the set of informations available for building an -// image from a tarfile with a Dockerfile in it. -// -// For more details about the Docker building process, see -// http://goo.gl/tlPXPu. -type BuildImageOptions struct { - Name string `qs:"t"` - Dockerfile string `qs:"dockerfile"` - NoCache bool `qs:"nocache"` - SuppressOutput bool `qs:"q"` - Pull bool `qs:"pull"` - RmTmpContainer bool `qs:"rm"` - ForceRmTmpContainer bool `qs:"forcerm"` - Memory int64 `qs:"memory"` - Memswap int64 `qs:"memswap"` - CPUShares int64 `qs:"cpushares"` - CPUSetCPUs string `qs:"cpusetcpus"` - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` - Remote string `qs:"remote"` - Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header - AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header - ContextDir string `qs:"-"` - Ulimits []ULimit `qs:"-"` -} - -// BuildImage builds an image from a tarball's url or a Dockerfile in the input -// stream. -// -// See https://goo.gl/xySxCe for more details. -func (c *Client) BuildImage(opts BuildImageOptions) error { - if opts.OutputStream == nil { - return ErrMissingOutputStream - } - headers, err := headersWithAuth(opts.Auth, c.versionedAuthConfigs(opts.AuthConfigs)) - if err != nil { - return err - } - - if opts.Remote != "" && opts.Name == "" { - opts.Name = opts.Remote - } - if opts.InputStream != nil || opts.ContextDir != "" { - headers["Content-Type"] = "application/tar" - } else if opts.Remote == "" { - return ErrMissingRepo - } - if opts.ContextDir != "" { - if opts.InputStream != nil { - return ErrMultipleContexts - } - var err error - if opts.InputStream, err = createTarStream(opts.ContextDir, opts.Dockerfile); err != nil { - return err - } - } - - qs := queryString(&opts) - if len(opts.Ulimits) > 0 { - if b, err := json.Marshal(opts.Ulimits); err == nil { - item := url.Values(map[string][]string{}) - item.Add("ulimits", string(b)) - qs = fmt.Sprintf("%s&%s", qs, item.Encode()) - } - } - - return c.stream("POST", fmt.Sprintf("/build?%s", qs), streamOptions{ - setRawTerminal: true, - rawJSONStream: opts.RawJSONStream, - headers: headers, - in: opts.InputStream, - stdout: opts.OutputStream, - }) -} - -func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) interface{} { - if c.serverAPIVersion == nil { - c.checkAPIVersion() - } - if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion119) { - return AuthConfigurations119(authConfigs.Configs) - } - return authConfigs -} - -// TagImageOptions present the set of options to tag an image. -// -// See https://goo.gl/98ZzkU for more details. -type TagImageOptions struct { - Repo string - Tag string - Force bool -} - -// TagImage adds a tag to the image identified by the given name. -// -// See https://goo.gl/98ZzkU for more details. -func (c *Client) TagImage(name string, opts TagImageOptions) error { - if name == "" { - return ErrNoSuchImage - } - resp, err := c.do("POST", fmt.Sprintf("/images/"+name+"/tag?%s", - queryString(&opts)), doOptions{}) - - if err != nil { - return err - } - - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound { - return ErrNoSuchImage - } - - return err -} - -func isURL(u string) bool { - p, err := url.Parse(u) - if err != nil { - return false - } - return p.Scheme == "http" || p.Scheme == "https" -} - -func headersWithAuth(auths ...interface{}) (map[string]string, error) { - var headers = make(map[string]string) - - for _, auth := range auths { - switch auth.(type) { - case AuthConfiguration: - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(auth); err != nil { - return nil, err - } - headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes()) - case AuthConfigurations, AuthConfigurations119: - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(auth); err != nil { - return nil, err - } - headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes()) - } - } - - return headers, nil -} - -// APIImageSearch reflect the result of a search on the Docker Hub. -// -// See https://goo.gl/AYjyrF for more details. -type APIImageSearch struct { - Description string `json:"description,omitempty" yaml:"description,omitempty"` - IsOfficial bool `json:"is_official,omitempty" yaml:"is_official,omitempty"` - IsAutomated bool `json:"is_automated,omitempty" yaml:"is_automated,omitempty"` - Name string `json:"name,omitempty" yaml:"name,omitempty"` - StarCount int `json:"star_count,omitempty" yaml:"star_count,omitempty"` -} - -// SearchImages search the docker hub with a specific given term. -// -// See https://goo.gl/AYjyrF for more details. -func (c *Client) SearchImages(term string) ([]APIImageSearch, error) { - resp, err := c.do("GET", "/images/search?term="+term, doOptions{}) - defer resp.Body.Close() - if err != nil { - return nil, err - } - var searchResult []APIImageSearch - if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil { - return nil, err - } - return searchResult, nil -} - -// SearchImagesEx search the docker hub with a specific given term and authentication. -// -// See https://goo.gl/AYjyrF for more details. -func (c *Client) SearchImagesEx(term string, auth AuthConfiguration) ([]APIImageSearch, error) { - headers, err := headersWithAuth(auth) - if err != nil { - return nil, err - } - - resp, err := c.do("GET", "/images/search?term="+term, doOptions{ - headers: headers, - }) - if err != nil { - return nil, err - } - - defer resp.Body.Close() - - var searchResult []APIImageSearch - if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil { - return nil, err - } - - return searchResult, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go deleted file mode 100644 index 34c96531a..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import "strings" - -// Version returns version information about the docker server. -// -// See https://goo.gl/ND9R8L for more details. -func (c *Client) Version() (*Env, error) { - resp, err := c.do("GET", "/version", doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var env Env - if err := env.Decode(resp.Body); err != nil { - return nil, err - } - return &env, nil -} - -// Info returns system-wide information about the Docker server. -// -// See https://goo.gl/ElTHi2 for more details. -func (c *Client) Info() (*Env, error) { - resp, err := c.do("GET", "/info", doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var info Env - if err := info.Decode(resp.Body); err != nil { - return nil, err - } - return &info, nil -} - -// ParseRepositoryTag gets the name of the repository and returns it splitted -// in two parts: the repository and the tag. -// -// Some examples: -// -// localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest -// localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, "" -func ParseRepositoryTag(repoTag string) (repository string, tag string) { - n := strings.LastIndex(repoTag, ":") - if n < 0 { - return repoTag, "" - } - if tag := repoTag[n+1:]; !strings.Contains(tag, "/") { - return repoTag[:n], tag - } - return repoTag, "" -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go deleted file mode 100644 index 8fa7091e4..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" -) - -// ErrNetworkAlreadyExists is the error returned by CreateNetwork when the -// network already exists. -var ErrNetworkAlreadyExists = errors.New("network already exists") - -// Network represents a network. -// -// See https://goo.gl/FDkCdQ for more details. -type Network struct { - Name string `json:"name"` - ID string `json:"id"` - Type string `json:"type"` - Endpoints []*Endpoint `json:"endpoints"` -} - -// Endpoint represents an endpoint. -// -// See https://goo.gl/FDkCdQ for more details. -type Endpoint struct { - Name string `json:"name"` - ID string `json:"id"` - Network string `json:"network"` -} - -// ListNetworks returns all networks. -// -// See https://goo.gl/4hCNtZ for more details. -func (c *Client) ListNetworks() ([]Network, error) { - resp, err := c.do("GET", "/networks", doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var networks []Network - if err := json.NewDecoder(resp.Body).Decode(&networks); err != nil { - return nil, err - } - return networks, nil -} - -// NetworkInfo returns information about a network by its ID. -// -// See https://goo.gl/4hCNtZ for more details. -func (c *Client) NetworkInfo(id string) (*Network, error) { - path := "/networks/" + id - resp, err := c.do("GET", path, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, &NoSuchNetwork{ID: id} - } - return nil, err - } - defer resp.Body.Close() - var network Network - if err := json.NewDecoder(resp.Body).Decode(&network); err != nil { - return nil, err - } - return &network, nil -} - -// CreateNetworkOptions specify parameters to the CreateNetwork function and -// (for now) is the expected body of the "create network" http request message -// -// See https://goo.gl/FDkCdQ for more details. -type CreateNetworkOptions struct { - Name string `json:"name"` - NetworkType string `json:"network_type"` - Options map[string]interface{} `json:"options"` -} - -// CreateNetwork creates a new network, returning the network instance, -// or an error in case of failure. -// -// See https://goo.gl/FDkCdQ for more details. -func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { - resp, err := c.do( - "POST", - "/networks", - doOptions{ - data: opts, - }, - ) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusConflict { - return nil, ErrNetworkAlreadyExists - } - return nil, err - } - defer resp.Body.Close() - - type createNetworkResponse struct { - ID string - } - var ( - network Network - cnr createNetworkResponse - ) - if err := json.NewDecoder(resp.Body).Decode(&cnr); err != nil { - return nil, err - } - - network.Name = opts.Name - network.ID = cnr.ID - network.Type = opts.NetworkType - - return &network, nil -} - -// RemoveNetwork removes a network or an error in case of failure. -// -// See https://goo.gl/FDkCdQ for more details. -func (c *Client) RemoveNetwork(id string) error { - resp, err := c.do("DELETE", "/networks/"+id, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return &NoSuchNetwork{ID: id} - } - return err - } - resp.Body.Close() - return nil -} - -// NoSuchNetwork is the error returned when a given network does not exist. -type NoSuchNetwork struct { - ID string -} - -func (err *NoSuchNetwork) Error() string { - return fmt.Sprintf("No such network: %s", err.ID) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go deleted file mode 100644 index 16aa00388..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -// Signal represents a signal that can be send to the container on -// KillContainer call. -type Signal int - -// These values represent all signals available on Linux, where containers will -// be running. -const ( - SIGABRT = Signal(0x6) - SIGALRM = Signal(0xe) - SIGBUS = Signal(0x7) - SIGCHLD = Signal(0x11) - SIGCLD = Signal(0x11) - SIGCONT = Signal(0x12) - SIGFPE = Signal(0x8) - SIGHUP = Signal(0x1) - SIGILL = Signal(0x4) - SIGINT = Signal(0x2) - SIGIO = Signal(0x1d) - SIGIOT = Signal(0x6) - SIGKILL = Signal(0x9) - SIGPIPE = Signal(0xd) - SIGPOLL = Signal(0x1d) - SIGPROF = Signal(0x1b) - SIGPWR = Signal(0x1e) - SIGQUIT = Signal(0x3) - SIGSEGV = Signal(0xb) - SIGSTKFLT = Signal(0x10) - SIGSTOP = Signal(0x13) - SIGSYS = Signal(0x1f) - SIGTERM = Signal(0xf) - SIGTRAP = Signal(0x5) - SIGTSTP = Signal(0x14) - SIGTTIN = Signal(0x15) - SIGTTOU = Signal(0x16) - SIGUNUSED = Signal(0x1f) - SIGURG = Signal(0x17) - SIGUSR1 = Signal(0xa) - SIGUSR2 = Signal(0xc) - SIGVTALRM = Signal(0x1a) - SIGWINCH = Signal(0x1c) - SIGXCPU = Signal(0x18) - SIGXFSZ = Signal(0x19) -) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go deleted file mode 100644 index 48042cbda..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils" -) - -func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) { - excludes, err := parseDockerignore(srcPath) - if err != nil { - return nil, err - } - - includes := []string{"."} - - // If .dockerignore mentions .dockerignore or the Dockerfile - // then make sure we send both files over to the daemon - // because Dockerfile is, obviously, needed no matter what, and - // .dockerignore is needed to know if either one needs to be - // removed. The deamon will remove them for us, if needed, after it - // parses the Dockerfile. - // - // https://github.com/docker/docker/issues/8330 - // - forceIncludeFiles := []string{".dockerignore", dockerfilePath} - - for _, includeFile := range forceIncludeFiles { - if includeFile == "" { - continue - } - keepThem, err := fileutils.Matches(includeFile, excludes) - if err != nil { - return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err) - } - if keepThem { - includes = append(includes, includeFile) - } - } - - if err := validateContextDirectory(srcPath, excludes); err != nil { - return nil, err - } - tarOpts := &archive.TarOptions{ - ExcludePatterns: excludes, - IncludeFiles: includes, - Compression: archive.Uncompressed, - NoLchown: true, - } - return archive.TarWithOptions(srcPath, tarOpts) -} - -// validateContextDirectory checks if all the contents of the directory -// can be read and returns an error if some files can't be read. -// Symlinks which point to non-existing files don't trigger an error -func validateContextDirectory(srcPath string, excludes []string) error { - return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { - // skip this directory/file if it's not in the path, it won't get added to the context - if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil { - return err - } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { - return err - } else if skip { - if f.IsDir() { - return filepath.SkipDir - } - return nil - } - - if err != nil { - if os.IsPermission(err) { - return fmt.Errorf("can't stat '%s'", filePath) - } - if os.IsNotExist(err) { - return nil - } - return err - } - - // skip checking if symlinks point to non-existing files, such symlinks can be useful - // also skip named pipes, because they hanging on open - if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { - return nil - } - - if !f.IsDir() { - currentFile, err := os.Open(filePath) - if err != nil && os.IsPermission(err) { - return fmt.Errorf("no permission to read from '%s'", filePath) - } - currentFile.Close() - } - return nil - }) -} - -func parseDockerignore(root string) ([]string, error) { - var excludes []string - ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")) - if err != nil && !os.IsNotExist(err) { - return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err) - } - excludes = strings.Split(string(ignore), "\n") - - return excludes, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore deleted file mode 100644 index 027e8c20e..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore +++ /dev/null @@ -1,3 +0,0 @@ -container.tar -dockerfile.tar -foofile diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile deleted file mode 100644 index 0948dcfa8..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -# this file describes how to build tsuru python image -# to run it: -# 1- install docker -# 2- run: $ docker build -t tsuru/python https://raw.github.com/tsuru/basebuilder/master/python/Dockerfile - -from base:ubuntu-quantal -run apt-get install wget -y --force-yes -run wget http://github.com/tsuru/basebuilder/tarball/master -O basebuilder.tar.gz --no-check-certificate -run mkdir /var/lib/tsuru -run tar -xvf basebuilder.tar.gz -C /var/lib/tsuru --strip 1 -run cp /var/lib/tsuru/python/deploy /var/lib/tsuru -run cp /var/lib/tsuru/base/restart /var/lib/tsuru -run cp /var/lib/tsuru/base/start /var/lib/tsuru -run /var/lib/tsuru/base/install -run /var/lib/tsuru/base/setup diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/barfile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/barfile deleted file mode 100644 index e69de29bb..000000000 diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem deleted file mode 100644 index 8e38bba13..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem +++ /dev/null @@ -1,18 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIC1TCCAb+gAwIBAgIQJ9MsNxrUxumNbAytGi3GEDALBgkqhkiG9w0BAQswFjEU -MBIGA1UEChMLQm9vdDJEb2NrZXIwHhcNMTQxMDE2MjAyMTM4WhcNMTcwOTMwMjAy -MTM4WjAWMRQwEgYDVQQKEwtCb290MkRvY2tlcjCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBALpFCSARjG+5yXoqr7UMzuE0df7RRZfeRZI06lJ02ZqV4Iii -rgL7ML9yPxX50NbLnjiilSDTUhnyocYFItokzUzz8qpX/nlYhuN2Iqwh4d0aWS8z -f5y248F+H1z+HY2W8NPl/6DVlVwYaNW1/k+RPMlHS0INLR6j+3Ievew7RNE0NnM2 -znELW6NetekDt3GUcz0Z95vDUDfdPnIk1eIFMmYvLxZh23xOca4Q37a3S8F3d+dN -+OOpwjdgY9Qme0NQUaXpgp58jWuQfB8q7mZrdnLlLqRa8gx1HeDSotX7UmWtWPkb -vd9EdlKLYw5PVpxMV1rkwf2t4TdgD5NfkpXlXkkCAwEAAaMjMCEwDgYDVR0PAQH/ -BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4IBAQBxYjHVSKqE -MJw7CW0GddesULtXXVWGJuZdWJLQlPvPMfIfjIvlcZyS4cdVNiQ3sREFIZz8TpII -CT0/Pg3sgv/FcOQe1CN0xZYZcyiAZHK1z0fJQq2qVpdv7+tJcjI2vvU6NI24iQCo -W1wz25trJz9QbdB2MRLMjyz7TSWuafztIvcfEzaIdQ0Whqund/cSuPGQx5IwF83F -rvlkOyJSH2+VIEBTCIuykJeL0DLTt8cePBQR5L1ISXb4RUMK9ZtqRscBRv8sn7o2 -ixG3wtL0gYF4xLtsQWVxI3iFVrU3WzOH/3c5shVRkWBd+AQRSwCJI4mKH7penJCF -i3/zzlkvOnjV ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem deleted file mode 100644 index 5e7244b24..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem +++ /dev/null @@ -1,18 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIC6DCCAdKgAwIBAgIRANO6ymxQAjp66KmEka1G6b0wCwYJKoZIhvcNAQELMBYx -FDASBgNVBAoTC0Jvb3QyRG9ja2VyMB4XDTE0MTAxNjIwMjE1MloXDTE3MDkzMDIw -MjE1MlowFjEUMBIGA1UEChMLQm9vdDJEb2NrZXIwggEiMA0GCSqGSIb3DQEBAQUA -A4IBDwAwggEKAoIBAQDGA1mAhSOpZspD1dpZ7qVEQrIJw4Xo8252jHaORnEdDiFm -b6brEmr6jw8t4P3IGxbqBc/TqRV+SSXxwYEVvfpeQKH+SmqStoMNtD3Ura161az4 -V0BcxMtSlsUGpoz+//QCAq8qiaxMwgiyc5253mkQm88anj2cNt7xbewiu/KFWuf7 -BVpNK1+ltpJmlukfcj/G+I1bw7j1KxBjDrFqe5cyDuuZcDL2tmUXP/ZWDyXwSv+H -AOckqn44z6aXlBkVvOXDBZJqY76d/vWVDNCuZeXRnqlhP3t1kH4V0RQXo+JD2tgt -JgdU0unzyoFOSWNUBPm73tqmjUGGAmGHBmeegJr/AgMBAAGjNTAzMA4GA1UdDwEB -/wQEAwIAgDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMAsGCSqG -SIb3DQEBCwOCAQEABVTWl5SmBP+j5He5bQsgnIXjviSKqe40/10V4LJAOmilycRF -zLrzM+YMwfjg6PLIs8CldAMWHw9y9ktZY4MxkgCktaiaN/QmMTMwFWEcN4wy5IpM -U5l93eAg7xsnY430h3QBBADujX4wdF3fs8rSL8zAAQFL0ihurwU124K3yXKsrwpb -CiVUGfIN4sPwjy8Ws9oxHFDC9/P8lgjHZ1nBIf8KSHnMzlxDGj7isQfhtH+7mcCL -cM1qO2NirS2v7uaEPPY+MJstAz+W7EJCW9dfMSmHna2SDC37Xkin7uEY9z+qaKFL -8d/XxOB/L8Ucy8VZhdsv0dsBq5KfJntITM0ksQ== ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar deleted file mode 100644 index e4b066e3b..000000000 Binary files a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar deleted file mode 100644 index 32c9ce647..000000000 Binary files a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/foofile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/foofile deleted file mode 100644 index e69de29bb..000000000 diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem deleted file mode 100644 index a9346bcf4..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAxgNZgIUjqWbKQ9XaWe6lREKyCcOF6PNudox2jkZxHQ4hZm+m -6xJq+o8PLeD9yBsW6gXP06kVfkkl8cGBFb36XkCh/kpqkraDDbQ91K2tetWs+FdA -XMTLUpbFBqaM/v/0AgKvKomsTMIIsnOdud5pEJvPGp49nDbe8W3sIrvyhVrn+wVa -TStfpbaSZpbpH3I/xviNW8O49SsQYw6xanuXMg7rmXAy9rZlFz/2Vg8l8Er/hwDn -JKp+OM+ml5QZFbzlwwWSamO+nf71lQzQrmXl0Z6pYT97dZB+FdEUF6PiQ9rYLSYH -VNLp88qBTkljVAT5u97apo1BhgJhhwZnnoCa/wIDAQABAoIBAQCaGy9EC9pmU95l -DwGh7k5nIrUnTilg1FwLHWSDdCVCZKXv8ENrPelOWZqJrUo1u4eI2L8XTsewgkNq -tJu/DRzWz9yDaO0qg6rZNobMh+K076lvmZA44twOydJLS8H+D7ua+PXU2FLlZjmY -kMyXRJZmW6zCXZc7haTbJx6ZJccoquk/DkS4FcFurJP177u1YrWS9TTw9kensUtU -jQ63uf56UTN1i+0+Rxl7OW1TZlqwlri5I4njg5249+FxwwHzIq8+l7zD7K9pl8c/ -nG1HuulvU2bVlDlRdyslMPAH34vw9Sku1BD8furrJLr1na5lRSLKJODEaIPEsLwv -CdEUwP9JAoGBAO76ZW80RyNB2fA+wbTq70Sr8CwrXxYemXrez5LKDC7SsohKFCPE -IedpO/n+nmymiiJvMm874EExoG6BVrbkWkeb+2vinEfOQNlDMsDx7WLjPekP3t6i -rXHO3CjFooVFq2z3mZa/Nc5NZqu8fNWNCKJxZDJphdoj6sORNJIUvZVjAoGBANQd -++J+ITcu3/+A6JrGcgLunBFQYPqkiItk0J4QKYKuX5ik9rWcQDN8TTtfW2mDuiQ4 -NrCwuVPq1V1kB16JzH017SsYLo9g8I20YjnBZge9pKTeUaLVTb3C50LW8FBylop0 -Bnm597dNbtSjphjoTMg0XyC19o3Esf2YeWG0QNS1AoGAWWDfFRNJU99qIldmXULM -0DM6NVrXSk+ReYnhunXEzrJQwXZrR+EwCPurydk36Uz0NuK9yypquhdUeF/5TZfk -SAoHo5byekyipl9imRUigqyY2BTudvgCxKDoaHtaSFwBPFTyZZYICquaLbrmOXxw -8UhVgCFFRYvPXuts7QHC0h8CgYBWEvy9gfU0kV7wLX02IUTuj6jhFb7ktpN6DSTi -nyhZES1VoctDEu6ydcRZTW6ouH12aSE4Pd5WgTqntQmQgVZrkNB25k8ue2Xh+srJ -KQOgLIJ9LIHwE6KCWG7DnrjRzE3uTPq7to0g4tkQjH/AJ7PQof/gJDayfJjFkXPg -A+cy6QKBgEPbKpiqscm03gT2QanBut5pg4dqPOxp0SlErA3kSFNTRK3oYBQPC+LH -qA5nD5brdkeNBB58Rll8Zpzxiff50bcvLP/7/Sb3NjaXFTEY0gVbdRof3n6N0YP3 -Hu5XDNJ9RNkNzE5RIG1g86KE+aKlcrKMaigqAiuIy2PSnjkQeGk8 ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem deleted file mode 100644 index 89cc445e1..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem +++ /dev/null @@ -1,18 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIC/DCCAeagAwIBAgIQMUILcXtvmSOK63zEBo0VXzALBgkqhkiG9w0BAQswFjEU -MBIGA1UEChMLQm9vdDJEb2NrZXIwHhcNMTQxMDE2MjAyMTQ2WhcNMTcwOTMwMjAy -MTQ2WjAWMRQwEgYDVQQKEwtCb290MkRvY2tlcjCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBANxUOUhNnqFnrTlLsBYzfFRZWQo268l+4K4lOJCVbfDonP3g -Mz0vGi9fcyFqEWSA8Y+ShXna625HTnReCwFdsu0861qCIq7v95hFFCyOe0iIxpd0 -AKLnl90d+1vonE7andgFgoobbTiMly4UK4H6z8D148fFNIihoteOG3PIF89TFxP7 -CJ/3wXnx/IKpdlO8PAnub3tBPJHvGDj7KORLy4IBxRX5VBAdfGNybE66fcrehEva -rLA4m9pgiaR/Nnr9FdKhPyqYdjflLNvzydxNvMIV4M0hFlhXmYvpMjA5/XsTnsyV -t9JHJa5Upwqsbne08t7rsm7liZNxZlko8xPOTQcCAwEAAaNKMEgwDgYDVR0PAQH/ -BAQDAgCgMAwGA1UdEwEB/wQCMAAwKAYDVR0RBCEwH4ILYm9vdDJkb2NrZXKHBH8A -AAGHBAoAAg+HBMCoO2cwCwYJKoZIhvcNAQELA4IBAQAYoYcDkDWkl73FZ0WnPmAj -LiF7HU95Qg3KyEpFsAJeShSLPPbQntmwhdekEzY4tQ3eKQB/+zHFjzsCr/lmDUmH -Ea/ryQ17C+jyH+Ykg0IWW6L6veZhvRDg6Z9focVtPVBRxPTqC/Qhb54blWRASV+W -UreMuXQ5+1dQptAM7ixOeLVHjBi/bd9TL3jvwBVCr9QedteMjjK4TCF9Tbcou+MF -2w3OJJZMDhcD+YwoK9uJDqlKmcTm/vVMbSsp/pTMcnQ7jxCeR8/XyX+VwTZwaHAa -o92Q/eg3THAiWhvyT/SzyH9dHHBAyXynUwGCggKawHktfvW4QXRPuLxLrJ7iB5cy ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem deleted file mode 100644 index c897e5da5..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEoAIBAAKCAQEA3FQ5SE2eoWetOUuwFjN8VFlZCjbryX7griU4kJVt8Oic/eAz -PS8aL19zIWoRZIDxj5KFedrrbkdOdF4LAV2y7TzrWoIiru/3mEUULI57SIjGl3QA -oueX3R37W+icTtqd2AWCihttOIyXLhQrgfrPwPXjx8U0iKGi144bc8gXz1MXE/sI -n/fBefH8gql2U7w8Ce5ve0E8ke8YOPso5EvLggHFFflUEB18Y3JsTrp9yt6ES9qs -sDib2mCJpH82ev0V0qE/Kph2N+Us2/PJ3E28whXgzSEWWFeZi+kyMDn9exOezJW3 -0kclrlSnCqxud7Ty3uuybuWJk3FmWSjzE85NBwIDAQABAoIBAG0ak+cW8LeShHf7 -3+2Of0GxoOLrAWWdG5uAuPr31CJYve0FybnBimDtDjD8ujIfm/7xmoEWBEFutA3x -x9dcU88gvJbsHEqub9gKVQwfXjMz78tt2SbSMiR/xUnk7QorPcCMMfE71aEMFYzu -1gCed6Rg3vO81t/V0rKVH0j9S7UQz5v/oX15eVDV5LOqyCHwAi6K0eXXbqnbI0TH -SOQ/nexM2msVXWbO9t6ra6f5V7FXziDK5Xi+rPxRbX9mkrDzxDAevfuRqYBx5vtL -W2Q2hKjUAHFgXFniNSZBS7dCdAtz0el/3ct+cNmpuTMhhs7M6wC1CuYiZ/DxLiFh -Si73VckCgYEA+/ceh3+VjtQ0rgEw8sD9bqYEA8IaBiObjneIoFnKBYRG7yZd8JMm -HD4M/aQ1qhcRLPN7GR03YQULgQJURbKSjJHnhfTXHyeHC3NN4gMVHQXewu2MHCh6 -7FCQ9CfK0KcYLgegVVvL3PrF3hyWGnmTu+G0UkDQRYVnaNrB7snrW6UCgYEA39tq -+MCQdu0moJ5szSZf02undg9EeW6isk9qzi7TId3/MLci2eH7PEnipipPUK3+DERq -aba0y0TKgBR2EXvXLFJA/+kfdo2loIEHOfox85HVfxgUaFRti63ZI0uF8D0QT2Yy -oJal+RFghVoSnv4LjhRKEPbIkScTXGjdK+7wFjsCfz79iKRXQQx0ALd/lL0bgkAn -QNmvrNHcFQeI2p8700WNzC39aX67SsvEt3qxkrjzC1gxhpTAuReIK1gVPPwvqHN8 -BmV20FD5kMlMCix2mNCopwgUWvKvLAvoGFTxncKMA39+aJbuXAjiqJTekKgNvOE7 -i9kEWw0GTNPp3JHV6QECgYAPwb0M11kT1euDIMOdyRazpf86kyaJuZzgGjD1ZFxe -JOcigbGFTp/FhZnbglzk2+pm6KXo3QBq0mPCki4hWusxZnTGzpz1VlETNCHTFeZQ -M7KoaIR/N3oie9Et59H8r/+m5xWnMhNqratyl316DX24uXrhKM3DUdHODl+LCR2D -IwKBgE1MbHuwolUPEw3HeO4R7NMFVTFei7E/fpUsimPfArGg8UydwvloNT1myJos -N2JzfGGjN2KPVcBk9fOs71mJ6VcK3C3g5JIccplk6h9VNaw55+zdQvKPTzoBoTvy -A+Fwx2AlF61KeRF87DL2YTRJ6B9MHmWgf7+GVZOxomLgEAcZ ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go deleted file mode 100644 index 5052e2d05..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go +++ /dev/null @@ -1,1096 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package testing provides a fake implementation of the Docker API, useful for -// testing purpose. -package testing - -import ( - "archive/tar" - "crypto/rand" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - mathrand "math/rand" - "net" - "net/http" - "regexp" - "strconv" - "strings" - "sync" - "time" - - "github.com/fsouza/go-dockerclient" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy" - "github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux" -) - -var nameRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`) - -// DockerServer represents a programmable, concurrent (not much), HTTP server -// implementing a fake version of the Docker remote API. -// -// It can used in standalone mode, listening for connections or as an arbitrary -// HTTP handler. -// -// For more details on the remote API, check http://goo.gl/G3plxW. -type DockerServer struct { - containers []*docker.Container - execs []*docker.ExecInspect - execMut sync.RWMutex - cMut sync.RWMutex - images []docker.Image - iMut sync.RWMutex - imgIDs map[string]string - networks []*docker.Network - netMut sync.RWMutex - listener net.Listener - mux *mux.Router - hook func(*http.Request) - failures map[string]string - multiFailures []map[string]string - execCallbacks map[string]func() - statsCallbacks map[string]func(string) docker.Stats - customHandlers map[string]http.Handler - handlerMutex sync.RWMutex - cChan chan<- *docker.Container -} - -// NewServer returns a new instance of the fake server, in standalone mode. Use -// the method URL to get the URL of the server. -// -// It receives the bind address (use 127.0.0.1:0 for getting an available port -// on the host), a channel of containers and a hook function, that will be -// called on every request. -// -// The fake server will send containers in the channel whenever the container -// changes its state, via the HTTP API (i.e.: create, start and stop). This -// channel may be nil, which means that the server won't notify on state -// changes. -func NewServer(bind string, containerChan chan<- *docker.Container, hook func(*http.Request)) (*DockerServer, error) { - listener, err := net.Listen("tcp", bind) - if err != nil { - return nil, err - } - server := DockerServer{ - listener: listener, - imgIDs: make(map[string]string), - hook: hook, - failures: make(map[string]string), - execCallbacks: make(map[string]func()), - statsCallbacks: make(map[string]func(string) docker.Stats), - customHandlers: make(map[string]http.Handler), - cChan: containerChan, - } - server.buildMuxer() - go http.Serve(listener, &server) - return &server, nil -} - -func (s *DockerServer) notify(container *docker.Container) { - if s.cChan != nil { - s.cChan <- container - } -} - -func (s *DockerServer) buildMuxer() { - s.mux = mux.NewRouter() - s.mux.Path("/commit").Methods("POST").HandlerFunc(s.handlerWrapper(s.commitContainer)) - s.mux.Path("/containers/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listContainers)) - s.mux.Path("/containers/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.createContainer)) - s.mux.Path("/containers/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectContainer)) - s.mux.Path("/containers/{id:.*}/rename").Methods("POST").HandlerFunc(s.handlerWrapper(s.renameContainer)) - s.mux.Path("/containers/{id:.*}/top").Methods("GET").HandlerFunc(s.handlerWrapper(s.topContainer)) - s.mux.Path("/containers/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startContainer)) - s.mux.Path("/containers/{id:.*}/kill").Methods("POST").HandlerFunc(s.handlerWrapper(s.stopContainer)) - s.mux.Path("/containers/{id:.*}/stop").Methods("POST").HandlerFunc(s.handlerWrapper(s.stopContainer)) - s.mux.Path("/containers/{id:.*}/pause").Methods("POST").HandlerFunc(s.handlerWrapper(s.pauseContainer)) - s.mux.Path("/containers/{id:.*}/unpause").Methods("POST").HandlerFunc(s.handlerWrapper(s.unpauseContainer)) - s.mux.Path("/containers/{id:.*}/wait").Methods("POST").HandlerFunc(s.handlerWrapper(s.waitContainer)) - s.mux.Path("/containers/{id:.*}/attach").Methods("POST").HandlerFunc(s.handlerWrapper(s.attachContainer)) - s.mux.Path("/containers/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeContainer)) - s.mux.Path("/containers/{id:.*}/exec").Methods("POST").HandlerFunc(s.handlerWrapper(s.createExecContainer)) - s.mux.Path("/containers/{id:.*}/stats").Methods("GET").HandlerFunc(s.handlerWrapper(s.statsContainer)) - s.mux.Path("/exec/{id:.*}/resize").Methods("POST").HandlerFunc(s.handlerWrapper(s.resizeExecContainer)) - s.mux.Path("/exec/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startExecContainer)) - s.mux.Path("/exec/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectExecContainer)) - s.mux.Path("/images/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.pullImage)) - s.mux.Path("/build").Methods("POST").HandlerFunc(s.handlerWrapper(s.buildImage)) - s.mux.Path("/images/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listImages)) - s.mux.Path("/images/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeImage)) - s.mux.Path("/images/{name:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectImage)) - s.mux.Path("/images/{name:.*}/push").Methods("POST").HandlerFunc(s.handlerWrapper(s.pushImage)) - s.mux.Path("/images/{name:.*}/tag").Methods("POST").HandlerFunc(s.handlerWrapper(s.tagImage)) - s.mux.Path("/events").Methods("GET").HandlerFunc(s.listEvents) - s.mux.Path("/_ping").Methods("GET").HandlerFunc(s.handlerWrapper(s.pingDocker)) - s.mux.Path("/images/load").Methods("POST").HandlerFunc(s.handlerWrapper(s.loadImage)) - s.mux.Path("/images/{id:.*}/get").Methods("GET").HandlerFunc(s.handlerWrapper(s.getImage)) - s.mux.Path("/networks").Methods("GET").HandlerFunc(s.handlerWrapper(s.listNetworks)) - s.mux.Path("/networks/{id:.*}").Methods("GET").HandlerFunc(s.handlerWrapper(s.networkInfo)) - s.mux.Path("/networks").Methods("POST").HandlerFunc(s.handlerWrapper(s.createNetwork)) -} - -// SetHook changes the hook function used by the server. -// -// The hook function is a function called on every request. -func (s *DockerServer) SetHook(hook func(*http.Request)) { - s.hook = hook -} - -// PrepareExec adds a callback to a container exec in the fake server. -// -// This function will be called whenever the given exec id is started, and the -// given exec id will remain in the "Running" start while the function is -// running, so it's useful for emulating an exec that runs for two seconds, for -// example: -// -// opts := docker.CreateExecOptions{ -// AttachStdin: true, -// AttachStdout: true, -// AttachStderr: true, -// Tty: true, -// Cmd: []string{"/bin/bash", "-l"}, -// } -// // Client points to a fake server. -// exec, err := client.CreateExec(opts) -// // handle error -// server.PrepareExec(exec.ID, func() {time.Sleep(2 * time.Second)}) -// err = client.StartExec(exec.ID, docker.StartExecOptions{Tty: true}) // will block for 2 seconds -// // handle error -func (s *DockerServer) PrepareExec(id string, callback func()) { - s.execCallbacks[id] = callback -} - -// PrepareStats adds a callback that will be called for each container stats -// call. -// -// This callback function will be called multiple times if stream is set to -// true when stats is called. -func (s *DockerServer) PrepareStats(id string, callback func(string) docker.Stats) { - s.statsCallbacks[id] = callback -} - -// PrepareFailure adds a new expected failure based on a URL regexp it receives -// an id for the failure. -func (s *DockerServer) PrepareFailure(id string, urlRegexp string) { - s.failures[id] = urlRegexp -} - -// PrepareMultiFailures enqueues a new expected failure based on a URL regexp -// it receives an id for the failure. -func (s *DockerServer) PrepareMultiFailures(id string, urlRegexp string) { - s.multiFailures = append(s.multiFailures, map[string]string{"error": id, "url": urlRegexp}) -} - -// ResetFailure removes an expected failure identified by the given id. -func (s *DockerServer) ResetFailure(id string) { - delete(s.failures, id) -} - -// ResetMultiFailures removes all enqueued failures. -func (s *DockerServer) ResetMultiFailures() { - s.multiFailures = []map[string]string{} -} - -// CustomHandler registers a custom handler for a specific path. -// -// For example: -// -// server.CustomHandler("/containers/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -// http.Error(w, "Something wrong is not right", http.StatusInternalServerError) -// })) -func (s *DockerServer) CustomHandler(path string, handler http.Handler) { - s.handlerMutex.Lock() - s.customHandlers[path] = handler - s.handlerMutex.Unlock() -} - -// MutateContainer changes the state of a container, returning an error if the -// given id does not match to any container "running" in the server. -func (s *DockerServer) MutateContainer(id string, state docker.State) error { - for _, container := range s.containers { - if container.ID == id { - container.State = state - return nil - } - } - return errors.New("container not found") -} - -// Stop stops the server. -func (s *DockerServer) Stop() { - if s.listener != nil { - s.listener.Close() - } -} - -// URL returns the HTTP URL of the server. -func (s *DockerServer) URL() string { - if s.listener == nil { - return "" - } - return "http://" + s.listener.Addr().String() + "/" -} - -// ServeHTTP handles HTTP requests sent to the server. -func (s *DockerServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { - s.handlerMutex.RLock() - defer s.handlerMutex.RUnlock() - for re, handler := range s.customHandlers { - if m, _ := regexp.MatchString(re, r.URL.Path); m { - handler.ServeHTTP(w, r) - return - } - } - s.mux.ServeHTTP(w, r) - if s.hook != nil { - s.hook(r) - } -} - -// DefaultHandler returns default http.Handler mux, it allows customHandlers to -// call the default behavior if wanted. -func (s *DockerServer) DefaultHandler() http.Handler { - return s.mux -} - -func (s *DockerServer) handlerWrapper(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - for errorID, urlRegexp := range s.failures { - matched, err := regexp.MatchString(urlRegexp, r.URL.Path) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - if !matched { - continue - } - http.Error(w, errorID, http.StatusBadRequest) - return - } - for i, failure := range s.multiFailures { - matched, err := regexp.MatchString(failure["url"], r.URL.Path) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - if !matched { - continue - } - http.Error(w, failure["error"], http.StatusBadRequest) - s.multiFailures = append(s.multiFailures[:i], s.multiFailures[i+1:]...) - return - } - f(w, r) - } -} - -func (s *DockerServer) listContainers(w http.ResponseWriter, r *http.Request) { - all := r.URL.Query().Get("all") - s.cMut.RLock() - result := make([]docker.APIContainers, 0, len(s.containers)) - for _, container := range s.containers { - if all == "1" || container.State.Running { - result = append(result, docker.APIContainers{ - ID: container.ID, - Image: container.Image, - Command: fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " ")), - Created: container.Created.Unix(), - Status: container.State.String(), - Ports: container.NetworkSettings.PortMappingAPI(), - Names: []string{fmt.Sprintf("/%s", container.Name)}, - }) - } - } - s.cMut.RUnlock() - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(result) -} - -func (s *DockerServer) listImages(w http.ResponseWriter, r *http.Request) { - s.cMut.RLock() - result := make([]docker.APIImages, len(s.images)) - for i, image := range s.images { - result[i] = docker.APIImages{ - ID: image.ID, - Created: image.Created.Unix(), - } - for tag, id := range s.imgIDs { - if id == image.ID { - result[i].RepoTags = append(result[i].RepoTags, tag) - } - } - } - s.cMut.RUnlock() - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(result) -} - -func (s *DockerServer) findImage(id string) (string, error) { - s.iMut.RLock() - defer s.iMut.RUnlock() - image, ok := s.imgIDs[id] - if ok { - return image, nil - } - image, _, err := s.findImageByID(id) - return image, err -} - -func (s *DockerServer) findImageByID(id string) (string, int, error) { - s.iMut.RLock() - defer s.iMut.RUnlock() - for i, image := range s.images { - if image.ID == id { - return image.ID, i, nil - } - } - return "", -1, errors.New("No such image") -} - -func (s *DockerServer) createContainer(w http.ResponseWriter, r *http.Request) { - var config struct { - *docker.Config - HostConfig *docker.HostConfig - } - defer r.Body.Close() - err := json.NewDecoder(r.Body).Decode(&config) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - name := r.URL.Query().Get("name") - if name != "" && !nameRegexp.MatchString(name) { - http.Error(w, "Invalid container name", http.StatusInternalServerError) - return - } - if _, err := s.findImage(config.Image); err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - ports := map[docker.Port][]docker.PortBinding{} - for port := range config.ExposedPorts { - ports[port] = []docker.PortBinding{{ - HostIP: "0.0.0.0", - HostPort: strconv.Itoa(mathrand.Int() % 65536), - }} - } - - //the container may not have cmd when using a Dockerfile - var path string - var args []string - if len(config.Cmd) == 1 { - path = config.Cmd[0] - } else if len(config.Cmd) > 1 { - path = config.Cmd[0] - args = config.Cmd[1:] - } - - generatedID := s.generateID() - config.Config.Hostname = generatedID[:12] - container := docker.Container{ - Name: name, - ID: generatedID, - Created: time.Now(), - Path: path, - Args: args, - Config: config.Config, - HostConfig: config.HostConfig, - State: docker.State{ - Running: false, - Pid: mathrand.Int() % 50000, - ExitCode: 0, - StartedAt: time.Now(), - }, - Image: config.Image, - NetworkSettings: &docker.NetworkSettings{ - IPAddress: fmt.Sprintf("172.16.42.%d", mathrand.Int()%250+2), - IPPrefixLen: 24, - Gateway: "172.16.42.1", - Bridge: "docker0", - Ports: ports, - }, - } - s.cMut.Lock() - if container.Name != "" { - for _, c := range s.containers { - if c.Name == container.Name { - defer s.cMut.Unlock() - http.Error(w, "there's already a container with this name", http.StatusConflict) - return - } - } - } - s.containers = append(s.containers, &container) - s.cMut.Unlock() - w.WriteHeader(http.StatusCreated) - s.notify(&container) - var c = struct{ ID string }{ID: container.ID} - json.NewEncoder(w).Encode(c) -} - -func (s *DockerServer) generateID() string { - var buf [16]byte - rand.Read(buf[:]) - return fmt.Sprintf("%x", buf) -} - -func (s *DockerServer) renameContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, index, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - copy := *container - copy.Name = r.URL.Query().Get("name") - s.cMut.Lock() - defer s.cMut.Unlock() - if s.containers[index].ID == copy.ID { - s.containers[index] = © - } - w.WriteHeader(http.StatusNoContent) -} - -func (s *DockerServer) inspectContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(container) -} - -func (s *DockerServer) statsContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - _, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - stream, _ := strconv.ParseBool(r.URL.Query().Get("stream")) - callback := s.statsCallbacks[id] - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - encoder := json.NewEncoder(w) - for { - var stats docker.Stats - if callback != nil { - stats = callback(id) - } - encoder.Encode(stats) - if !stream { - break - } - } -} - -func (s *DockerServer) topContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - if !container.State.Running { - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "Container %s is not running", id) - return - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - result := docker.TopResult{ - Titles: []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"}, - Processes: [][]string{ - {"root", "7535", "7516", "0", "03:20", "?", "00:00:00", container.Path + " " + strings.Join(container.Args, " ")}, - }, - } - json.NewEncoder(w).Encode(result) -} - -func (s *DockerServer) startContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - s.cMut.Lock() - defer s.cMut.Unlock() - defer r.Body.Close() - var hostConfig docker.HostConfig - err = json.NewDecoder(r.Body).Decode(&hostConfig) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - container.HostConfig = &hostConfig - if container.State.Running { - http.Error(w, "", http.StatusNotModified) - return - } - container.State.Running = true - s.notify(container) -} - -func (s *DockerServer) stopContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - s.cMut.Lock() - defer s.cMut.Unlock() - if !container.State.Running { - http.Error(w, "Container not running", http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusNoContent) - container.State.Running = false - s.notify(container) -} - -func (s *DockerServer) pauseContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - s.cMut.Lock() - defer s.cMut.Unlock() - if container.State.Paused { - http.Error(w, "Container already paused", http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusNoContent) - container.State.Paused = true -} - -func (s *DockerServer) unpauseContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - s.cMut.Lock() - defer s.cMut.Unlock() - if !container.State.Paused { - http.Error(w, "Container not paused", http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusNoContent) - container.State.Paused = false -} - -func (s *DockerServer) attachContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - hijacker, ok := w.(http.Hijacker) - if !ok { - http.Error(w, "cannot hijack connection", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/vnd.docker.raw-stream") - w.WriteHeader(http.StatusOK) - conn, _, err := hijacker.Hijack() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - wg := sync.WaitGroup{} - if r.URL.Query().Get("stdin") == "1" { - wg.Add(1) - go func() { - ioutil.ReadAll(conn) - wg.Done() - }() - } - outStream := stdcopy.NewStdWriter(conn, stdcopy.Stdout) - if container.State.Running { - fmt.Fprintf(outStream, "Container is running\n") - } else { - fmt.Fprintf(outStream, "Container is not running\n") - } - fmt.Fprintln(outStream, "What happened?") - fmt.Fprintln(outStream, "Something happened") - wg.Wait() - if r.URL.Query().Get("stream") == "1" { - for { - time.Sleep(1e6) - s.cMut.RLock() - if !container.State.Running { - s.cMut.RUnlock() - break - } - s.cMut.RUnlock() - } - } - conn.Close() -} - -func (s *DockerServer) waitContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - for { - time.Sleep(1e6) - s.cMut.RLock() - if !container.State.Running { - s.cMut.RUnlock() - break - } - s.cMut.RUnlock() - } - result := map[string]int{"StatusCode": container.State.ExitCode} - json.NewEncoder(w).Encode(result) -} - -func (s *DockerServer) removeContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - force := r.URL.Query().Get("force") - _, index, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - if s.containers[index].State.Running && force != "1" { - msg := "Error: API error (406): Impossible to remove a running container, please stop it first" - http.Error(w, msg, http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusNoContent) - s.cMut.Lock() - defer s.cMut.Unlock() - s.containers[index] = s.containers[len(s.containers)-1] - s.containers = s.containers[:len(s.containers)-1] -} - -func (s *DockerServer) commitContainer(w http.ResponseWriter, r *http.Request) { - id := r.URL.Query().Get("container") - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - var config *docker.Config - runConfig := r.URL.Query().Get("run") - if runConfig != "" { - config = new(docker.Config) - err = json.Unmarshal([]byte(runConfig), config) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - } - w.WriteHeader(http.StatusOK) - image := docker.Image{ - ID: "img-" + container.ID, - Parent: container.Image, - Container: container.ID, - Comment: r.URL.Query().Get("m"), - Author: r.URL.Query().Get("author"), - Config: config, - } - repository := r.URL.Query().Get("repo") - tag := r.URL.Query().Get("tag") - s.iMut.Lock() - s.images = append(s.images, image) - if repository != "" { - if tag != "" { - repository += ":" + tag - } - s.imgIDs[repository] = image.ID - } - s.iMut.Unlock() - fmt.Fprintf(w, `{"ID":%q}`, image.ID) -} - -func (s *DockerServer) findContainer(idOrName string) (*docker.Container, int, error) { - s.cMut.RLock() - defer s.cMut.RUnlock() - for i, container := range s.containers { - if container.ID == idOrName || container.Name == idOrName { - return container, i, nil - } - } - return nil, -1, errors.New("No such container") -} - -func (s *DockerServer) buildImage(w http.ResponseWriter, r *http.Request) { - if ct := r.Header.Get("Content-Type"); ct == "application/tar" { - gotDockerFile := false - tr := tar.NewReader(r.Body) - for { - header, err := tr.Next() - if err != nil { - break - } - if header.Name == "Dockerfile" { - gotDockerFile = true - } - } - if !gotDockerFile { - w.WriteHeader(http.StatusBadRequest) - w.Write([]byte("miss Dockerfile")) - return - } - } - //we did not use that Dockerfile to build image cause we are a fake Docker daemon - image := docker.Image{ - ID: s.generateID(), - Created: time.Now(), - } - - query := r.URL.Query() - repository := image.ID - if t := query.Get("t"); t != "" { - repository = t - } - s.iMut.Lock() - s.images = append(s.images, image) - s.imgIDs[repository] = image.ID - s.iMut.Unlock() - w.Write([]byte(fmt.Sprintf("Successfully built %s", image.ID))) -} - -func (s *DockerServer) pullImage(w http.ResponseWriter, r *http.Request) { - fromImageName := r.URL.Query().Get("fromImage") - tag := r.URL.Query().Get("tag") - image := docker.Image{ - ID: s.generateID(), - } - s.iMut.Lock() - s.images = append(s.images, image) - if fromImageName != "" { - if tag != "" { - fromImageName = fmt.Sprintf("%s:%s", fromImageName, tag) - } - s.imgIDs[fromImageName] = image.ID - } - s.iMut.Unlock() -} - -func (s *DockerServer) pushImage(w http.ResponseWriter, r *http.Request) { - name := mux.Vars(r)["name"] - tag := r.URL.Query().Get("tag") - if tag != "" { - name += ":" + tag - } - s.iMut.RLock() - if _, ok := s.imgIDs[name]; !ok { - s.iMut.RUnlock() - http.Error(w, "No such image", http.StatusNotFound) - return - } - s.iMut.RUnlock() - fmt.Fprintln(w, "Pushing...") - fmt.Fprintln(w, "Pushed") -} - -func (s *DockerServer) tagImage(w http.ResponseWriter, r *http.Request) { - name := mux.Vars(r)["name"] - s.iMut.RLock() - if _, ok := s.imgIDs[name]; !ok { - s.iMut.RUnlock() - http.Error(w, "No such image", http.StatusNotFound) - return - } - s.iMut.RUnlock() - s.iMut.Lock() - defer s.iMut.Unlock() - newRepo := r.URL.Query().Get("repo") - newTag := r.URL.Query().Get("tag") - if newTag != "" { - newRepo += ":" + newTag - } - s.imgIDs[newRepo] = s.imgIDs[name] - w.WriteHeader(http.StatusCreated) -} - -func (s *DockerServer) removeImage(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - s.iMut.RLock() - var tag string - if img, ok := s.imgIDs[id]; ok { - id, tag = img, id - } - var tags []string - for tag, taggedID := range s.imgIDs { - if taggedID == id { - tags = append(tags, tag) - } - } - s.iMut.RUnlock() - _, index, err := s.findImageByID(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - w.WriteHeader(http.StatusNoContent) - s.iMut.Lock() - defer s.iMut.Unlock() - if len(tags) < 2 { - s.images[index] = s.images[len(s.images)-1] - s.images = s.images[:len(s.images)-1] - } - if tag != "" { - delete(s.imgIDs, tag) - } -} - -func (s *DockerServer) inspectImage(w http.ResponseWriter, r *http.Request) { - name := mux.Vars(r)["name"] - s.iMut.RLock() - defer s.iMut.RUnlock() - if id, ok := s.imgIDs[name]; ok { - for _, img := range s.images { - if img.ID == id { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(img) - return - } - } - } - http.Error(w, "not found", http.StatusNotFound) -} - -func (s *DockerServer) listEvents(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - var events [][]byte - count := mathrand.Intn(20) - for i := 0; i < count; i++ { - data, err := json.Marshal(s.generateEvent()) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - return - } - events = append(events, data) - } - w.WriteHeader(http.StatusOK) - for _, d := range events { - fmt.Fprintln(w, d) - time.Sleep(time.Duration(mathrand.Intn(200)) * time.Millisecond) - } -} - -func (s *DockerServer) pingDocker(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) -} - -func (s *DockerServer) generateEvent() *docker.APIEvents { - var eventType string - switch mathrand.Intn(4) { - case 0: - eventType = "create" - case 1: - eventType = "start" - case 2: - eventType = "stop" - case 3: - eventType = "destroy" - } - return &docker.APIEvents{ - ID: s.generateID(), - Status: eventType, - From: "mybase:latest", - Time: time.Now().Unix(), - } -} - -func (s *DockerServer) loadImage(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) -} - -func (s *DockerServer) getImage(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - w.Header().Set("Content-Type", "application/tar") -} - -func (s *DockerServer) createExecContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - - execID := s.generateID() - container.ExecIDs = append(container.ExecIDs, execID) - - exec := docker.ExecInspect{ - ID: execID, - Container: *container, - } - - var params docker.CreateExecOptions - err = json.NewDecoder(r.Body).Decode(¶ms) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - if len(params.Cmd) > 0 { - exec.ProcessConfig.EntryPoint = params.Cmd[0] - if len(params.Cmd) > 1 { - exec.ProcessConfig.Arguments = params.Cmd[1:] - } - } - - exec.ProcessConfig.User = params.User - exec.ProcessConfig.Tty = params.Tty - - s.execMut.Lock() - s.execs = append(s.execs, &exec) - s.execMut.Unlock() - w.WriteHeader(http.StatusOK) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(map[string]string{"Id": exec.ID}) -} - -func (s *DockerServer) startExecContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - if exec, err := s.getExec(id, false); err == nil { - s.execMut.Lock() - exec.Running = true - s.execMut.Unlock() - if callback, ok := s.execCallbacks[id]; ok { - callback() - delete(s.execCallbacks, id) - } else if callback, ok := s.execCallbacks["*"]; ok { - callback() - delete(s.execCallbacks, "*") - } - s.execMut.Lock() - exec.Running = false - s.execMut.Unlock() - w.WriteHeader(http.StatusOK) - return - } - w.WriteHeader(http.StatusNotFound) -} - -func (s *DockerServer) resizeExecContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - if _, err := s.getExec(id, false); err == nil { - w.WriteHeader(http.StatusOK) - return - } - w.WriteHeader(http.StatusNotFound) -} - -func (s *DockerServer) inspectExecContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - if exec, err := s.getExec(id, true); err == nil { - w.WriteHeader(http.StatusOK) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(exec) - return - } - w.WriteHeader(http.StatusNotFound) -} - -func (s *DockerServer) getExec(id string, copy bool) (*docker.ExecInspect, error) { - s.execMut.RLock() - defer s.execMut.RUnlock() - for _, exec := range s.execs { - if exec.ID == id { - if copy { - cp := *exec - exec = &cp - } - return exec, nil - } - } - return nil, errors.New("exec not found") -} - -func (s *DockerServer) findNetwork(idOrName string) (*docker.Network, int, error) { - s.netMut.RLock() - defer s.netMut.RUnlock() - for i, network := range s.networks { - if network.ID == idOrName || network.Name == idOrName { - return network, i, nil - } - } - return nil, -1, errors.New("No such network") -} - -func (s *DockerServer) listNetworks(w http.ResponseWriter, r *http.Request) { - s.netMut.RLock() - result := make([]docker.Network, 0, len(s.networks)) - for _, network := range s.networks { - result = append(result, *network) - } - s.netMut.RUnlock() - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(result) -} - -func (s *DockerServer) networkInfo(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - network, _, err := s.findNetwork(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(network) -} - -// isValidName validates configuration objects supported by libnetwork -func isValidName(name string) bool { - if name == "" || strings.Contains(name, ".") { - return false - } - return true -} - -func (s *DockerServer) createNetwork(w http.ResponseWriter, r *http.Request) { - var config *docker.CreateNetworkOptions - defer r.Body.Close() - err := json.NewDecoder(r.Body).Decode(&config) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - if !isValidName(config.Name) { - http.Error(w, "Invalid network name", http.StatusBadRequest) - return - } - if n, _, _ := s.findNetwork(config.Name); n != nil { - http.Error(w, "network already exists", http.StatusForbidden) - return - } - - generatedID := s.generateID() - network := docker.Network{ - Name: config.Name, - ID: generatedID, - Type: config.NetworkType, - } - s.netMut.Lock() - s.networks = append(s.networks, &network) - s.netMut.Unlock() - w.WriteHeader(http.StatusCreated) - var c = struct{ ID string }{ID: network.ID} - json.NewEncoder(w).Encode(c) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go deleted file mode 100644 index 55f43174b..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// The content is borrowed from Docker's own source code to provide a simple -// tls based dialer - -package docker - -import ( - "crypto/tls" - "errors" - "net" - "strings" - "time" -) - -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if cwc, ok := c.rawConn.(interface { - CloseWrite() error - }); ok { - return cwc.CloseWrite() - } - return nil -} - -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := dialer.Deadline.Sub(time.Now()) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout - } - } - - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - rawConn, err := dialer.Dial(network, addr) - if err != nil { - return nil, err - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - c := *config - c.ServerName = hostname - config = &c - } - - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel - } - - if err != nil { - rawConn.Close() - return nil, err - } - - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume.go deleted file mode 100644 index a989a6eee..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "net/http" -) - -var ( - // ErrNoSuchVolume is the error returned when the volume does not exist. - ErrNoSuchVolume = errors.New("no such volume") - - // ErrVolumeInUse is the error returned when the volume requested to be removed is still in use. - ErrVolumeInUse = errors.New("volume in use and cannot be removed") -) - -// Volume represents a volume. -// -// See https://goo.gl/FZA4BK for more details. -type Volume struct { - Name string `json:"Name" yaml:"Name"` - Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"` - Mountpoint string `json:"Mountpoint,omitempty" yaml:"Mountpoint,omitempty"` -} - -// ListVolumesOptions specify parameters to the ListVolumes function. -// -// See https://goo.gl/FZA4BK for more details. -type ListVolumesOptions struct { - Filters map[string][]string -} - -// ListVolumes returns a list of available volumes in the server. -// -// See https://goo.gl/FZA4BK for more details. -func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) { - resp, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - m := make(map[string]interface{}) - if err := json.NewDecoder(resp.Body).Decode(&m); err != nil { - return nil, err - } - var volumes []Volume - volumesJSON, ok := m["Volumes"] - if !ok { - return volumes, nil - } - data, err := json.Marshal(volumesJSON) - if err != nil { - return nil, err - } - if err := json.Unmarshal(data, &volumes); err != nil { - return nil, err - } - return volumes, nil -} - -// CreateVolumeOptions specify parameters to the CreateVolume function. -// -// See https://goo.gl/pBUbZ9 for more details. -type CreateVolumeOptions struct { - Name string - Driver string - DriverOpts map[string]string -} - -// CreateVolume creates a volume on the server. -// -// See https://goo.gl/pBUbZ9 for more details. -func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) { - resp, err := c.do("POST", "/volumes", doOptions{data: opts}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var volume Volume - if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil { - return nil, err - } - return &volume, nil -} - -// InspectVolume returns a volume by its name. -// -// See https://goo.gl/0g9A6i for more details. -func (c *Client) InspectVolume(name string) (*Volume, error) { - resp, err := c.do("GET", "/volumes/"+name, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { - return nil, ErrNoSuchVolume - } - return nil, err - } - defer resp.Body.Close() - var volume Volume - if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil { - return nil, err - } - return &volume, nil -} - -// RemoveVolume removes a volume by its name. -// -// See https://goo.gl/79GNQz for more details. -func (c *Client) RemoveVolume(name string) error { - resp, err := c.do("DELETE", "/volumes/"+name, doOptions{}) - if err != nil { - if e, ok := err.(*Error); ok { - if e.Status == http.StatusNotFound { - return ErrNoSuchVolume - } - if e.Status == http.StatusConflict { - return ErrVolumeInUse - } - } - return nil - } - defer resp.Body.Close() - return nil -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore deleted file mode 100644 index ba8e0cb3a..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -.DS_Store -.DS_Store? -._* -.Spotlight-V100 -.Trashes -Icon? -ehthumbs.db -Thumbs.db diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml deleted file mode 100644 index 2f4e3c2f0..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -sudo: false -language: go -go: - - 1.2 - - 1.3 - - 1.4 - - tip - -before_script: - - mysql -e 'create database gotest;' diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS deleted file mode 100644 index 6fc4c6f7b..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS +++ /dev/null @@ -1,44 +0,0 @@ -# This is the official list of Go-MySQL-Driver authors for copyright purposes. - -# If you are submitting a patch, please add your name or the name of the -# organization which holds the copyright to this list in alphabetical order. - -# Names should be added to this file as -# Name -# The email address is not required for organizations. -# Please keep the list sorted. - - -# Individual Persons - -Aaron Hopkins -Arne Hormann -Carlos Nieto -Chris Moos -DisposaBoy -Frederick Mayle -Gustavo Kristic -Hanno Braun -Henri Yandell -INADA Naoki -James Harr -Jian Zhen -Joshua Prunier -Julien Schmidt -Kamil Dziedzic -Leonardo YongUk Kim -Lucas Liu -Luke Scott -Michael Woolnough -Nicola Peduzzi -Runrioter Wung -Soroush Pour -Stan Putrya -Xiaobing Jiang -Xiuming Chen - -# Organizations - -Barracuda Networks, Inc. -Google Inc. -Stripe Inc. diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md deleted file mode 100644 index 161ad0fcc..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md +++ /dev/null @@ -1,92 +0,0 @@ -## HEAD - -Changes: - - - Go 1.1 is no longer supported - - Use decimals field from MySQL to format time types (#249) - - Buffer optimizations (#269) - - TLS ServerName defaults to the host (#283) - -Bugfixes: - - - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) - - Fixed handling of queries without columns and rows (#255) - - Fixed a panic when SetKeepAlive() failed (#298) - -New Features: - - Support for returning table alias on Columns() (#289) - - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318) - - -## Version 1.2 (2014-06-03) - -Changes: - - - We switched back to a "rolling release". `go get` installs the current master branch again - - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver - - Exported errors to allow easy checking from application code - - Enabled TCP Keepalives on TCP connections - - Optimized INFILE handling (better buffer size calculation, lazy init, ...) - - The DSN parser also checks for a missing separating slash - - Faster binary date / datetime to string formatting - - Also exported the MySQLWarning type - - mysqlConn.Close returns the first error encountered instead of ignoring all errors - - writePacket() automatically writes the packet size to the header - - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets - -New Features: - - - `RegisterDial` allows the usage of a custom dial function to establish the network connection - - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter - - Logging of critical errors is configurable with `SetLogger` - - Google CloudSQL support - -Bugfixes: - - - Allow more than 32 parameters in prepared statements - - Various old_password fixes - - Fixed TestConcurrent test to pass Go's race detection - - Fixed appendLengthEncodedInteger for large numbers - - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo) - - -## Version 1.1 (2013-11-02) - -Changes: - - - Go-MySQL-Driver now requires Go 1.1 - - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore - - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors - - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` - - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. - - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries - - Optimized the buffer for reading - - stmt.Query now caches column metadata - - New Logo - - Changed the copyright header to include all contributors - - Improved the LOAD INFILE documentation - - The driver struct is now exported to make the driver directly accessible - - Refactored the driver tests - - Added more benchmarks and moved all to a separate file - - Other small refactoring - -New Features: - - - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure - - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs - - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used - -Bugfixes: - - - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification - - Convert to DB timezone when inserting `time.Time` - - Splitted packets (more than 16MB) are now merged correctly - - Fixed false positive `io.EOF` errors when the data was fully read - - Avoid panics on reuse of closed connections - - Fixed empty string producing false nil values - - Fixed sign byte for positive TIME fields - - -## Version 1.0 (2013-05-14) - -Initial Release diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md deleted file mode 100644 index f87c19824..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md +++ /dev/null @@ -1,40 +0,0 @@ -# Contributing Guidelines - -## Reporting Issues - -Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed). - -Please provide the following minimum information: -* Your Go-MySQL-Driver version (or git SHA) -* Your Go version (run `go version` in your console) -* A detailed issue description -* Error Log if present -* If possible, a short example - - -## Contributing Code - -By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file. -Don't forget to add yourself to the AUTHORS file. - -### Pull Requests Checklist - -Please check the following points before submitting your pull request: -- [x] Code compiles correctly -- [x] Created tests, if possible -- [x] All tests pass -- [x] Extended the README / documentation, if necessary -- [x] Added yourself to the AUTHORS file - -### Code Review - -Everyone is invited to review and comment on pull requests. -If it looks fine to you, comment with "LGTM" (Looks good to me). - -If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. - -Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM". - -## Development Ideas - -If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page. diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE deleted file mode 100644 index 14e2f777f..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE +++ /dev/null @@ -1,373 +0,0 @@ -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/README.md b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/README.md deleted file mode 100644 index 6a2bb2ca3..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/README.md +++ /dev/null @@ -1,386 +0,0 @@ -# Go-MySQL-Driver - -A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package - -![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin") - -**Latest stable Release:** [Version 1.2 (June 03, 2014)](https://github.com/go-sql-driver/mysql/releases) - -[![Build Status](https://travis-ci.org/go-sql-driver/mysql.png?branch=master)](https://travis-ci.org/go-sql-driver/mysql) - ---------------------------------------- - * [Features](#features) - * [Requirements](#requirements) - * [Installation](#installation) - * [Usage](#usage) - * [DSN (Data Source Name)](#dsn-data-source-name) - * [Password](#password) - * [Protocol](#protocol) - * [Address](#address) - * [Parameters](#parameters) - * [Examples](#examples) - * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) - * [time.Time support](#timetime-support) - * [Unicode support](#unicode-support) - * [Testing / Development](#testing--development) - * [License](#license) - ---------------------------------------- - -## Features - * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance") - * Native Go implementation. No C-bindings, just pure Go - * Connections over TCP/IPv4, TCP/IPv6 or Unix domain sockets - * Automatic handling of broken connections - * Automatic Connection Pooling *(by database/sql package)* - * Supports queries larger than 16MB - * Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support. - * Intelligent `LONG DATA` handling in prepared statements - * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support - * Optional `time.Time` parsing - * Optional placeholder interpolation - -## Requirements - * Go 1.2 or higher - * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) - ---------------------------------------- - -## Installation -Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell: -```bash -$ go get github.com/go-sql-driver/mysql -``` -Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`. - -## Usage -_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then. - -Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`: -```go -import "database/sql" -import _ "github.com/go-sql-driver/mysql" - -db, err := sql.Open("mysql", "user:password@/dbname") -``` - -[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples"). - - -### DSN (Data Source Name) - -The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets): -``` -[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] -``` - -A DSN in its fullest form: -``` -username:password@protocol(address)/dbname?param=value -``` - -Except for the databasename, all values are optional. So the minimal DSN is: -``` -/dbname -``` - -If you do not want to preselect a database, leave `dbname` empty: -``` -/ -``` -This has the same effect as an empty DSN string: -``` - -``` - -#### Password -Passwords can consist of any character. Escaping is **not** necessary. - -#### Protocol -See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available. -In general you should use an Unix domain socket if available and TCP otherwise for best performance. - -#### Address -For TCP and UDP networks, addresses have the form `host:port`. -If `host` is a literal IPv6 address, it must be enclosed in square brackets. -The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. - -For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`. - -#### Parameters -*Parameters are case-sensitive!* - -Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`. - -##### `allowAllFiles` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files. -[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) - -##### `allowCleartextPasswords` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network. - -##### `allowOldPasswords` - -``` -Type: bool -Valid Values: true, false -Default: false -``` -`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords). - -##### `charset` - -``` -Type: string -Valid Values: -Default: none -``` - -Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). - -Usage of the `charset` parameter is discouraged because it issues additional queries to the server. -Unless you need the fallback behavior, please use `collation` instead. - -##### `collation` - -``` -Type: string -Valid Values: -Default: utf8_general_ci -``` - -Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail. - -A list of valid charsets for a server is retrievable with `SHOW COLLATION`. - -##### `clientFoundRows` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed. - -##### `columnsWithAlias` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example: - -``` -SELECT u.id FROM users as u -``` - -will return `u.id` instead of just `id` if `columnsWithAlias=true`. - -##### `interpolateParams` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`. - -*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!* - -##### `loc` - -``` -Type: string -Valid Values: -Default: UTC -``` - -Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details. - -Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter. - -Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. - - -##### `parseTime` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string` - - -##### `strict` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`strict=true` enables the strict mode in which MySQL warnings are treated as errors. - -By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. See the [examples](#examples) for an DSN example. - - -##### `timeout` - -``` -Type: decimal number -Default: OS default -``` - -*Driver* side connection timeout. The value must be a string of decimal numbers, each with optional fraction and a unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout). - - -##### `tls` - -``` -Type: bool / string -Valid Values: true, false, skip-verify, -Default: false -``` - -`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). - - -##### System Variables - -All other parameters are interpreted as system variables: - * `autocommit`: `"SET autocommit="` - * [`time_zone`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `"SET time_zone="` - * [`tx_isolation`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `"SET tx_isolation="` - * `param`: `"SET ="` - -*The values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!* - -#### Examples -``` -user@unix(/path/to/socket)/dbname -``` - -``` -root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local -``` - -``` -user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true -``` - -Use the [strict mode](#strict) but ignore notes: -``` -user:password@/dbname?strict=true&sql_notes=false -``` - -TCP via IPv6: -``` -user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci -``` - -TCP on a remote host, e.g. Amazon RDS: -``` -id:password@tcp(your-amazonaws-uri.com:3306)/dbname -``` - -Google Cloud SQL on App Engine: -``` -user@cloudsql(project-id:instance-name)/dbname -``` - -TCP using default port (3306) on localhost: -``` -user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped -``` - -Use the default protocol (tcp) and host (localhost:3306): -``` -user:password@/dbname -``` - -No Database preselected: -``` -user:password@/ -``` - -### `LOAD DATA LOCAL INFILE` support -For this feature you need direct access to the package. Therefore you must change the import path (no `_`): -```go -import "github.com/go-sql-driver/mysql" -``` - -Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). - -To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. - -See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details. - - -### `time.Time` support -The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm. - -However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter. - -**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). - -Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`. - - -### Unicode support -Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default. - -Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. - -Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. - -See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support. - - -## Testing / Development -To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. - -Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated. -If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls). - -See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details. - ---------------------------------------- - -## License -Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) - -Mozilla summarizes the license scope as follows: -> MPL: The copyleft applies to any files containing MPLed code. - - -That means: - * You can **use** the **unchanged** source code both in private and commercially - * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0) - * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged** - -Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license. - -You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) - -![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") - diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go deleted file mode 100644 index 565614eef..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go +++ /dev/null @@ -1,19 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -// +build appengine - -package mysql - -import ( - "appengine/cloudsql" -) - -func init() { - RegisterDial("cloudsql", cloudsql.Dial) -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go deleted file mode 100644 index fb8a2f5f3..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go +++ /dev/null @@ -1,246 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "bytes" - "database/sql" - "database/sql/driver" - "math" - "strings" - "sync" - "sync/atomic" - "testing" - "time" -) - -type TB testing.B - -func (tb *TB) check(err error) { - if err != nil { - tb.Fatal(err) - } -} - -func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB { - tb.check(err) - return db -} - -func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows { - tb.check(err) - return rows -} - -func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt { - tb.check(err) - return stmt -} - -func initDB(b *testing.B, queries ...string) *sql.DB { - tb := (*TB)(b) - db := tb.checkDB(sql.Open("mysql", dsn)) - for _, query := range queries { - if _, err := db.Exec(query); err != nil { - if w, ok := err.(MySQLWarnings); ok { - b.Logf("Warning on %q: %v", query, w) - } else { - b.Fatalf("Error on %q: %v", query, err) - } - } - } - return db -} - -const concurrencyLevel = 10 - -func BenchmarkQuery(b *testing.B) { - tb := (*TB)(b) - b.StopTimer() - b.ReportAllocs() - db := initDB(b, - "DROP TABLE IF EXISTS foo", - "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))", - `INSERT INTO foo VALUES (1, "one")`, - `INSERT INTO foo VALUES (2, "two")`, - ) - db.SetMaxIdleConns(concurrencyLevel) - defer db.Close() - - stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?")) - defer stmt.Close() - - remain := int64(b.N) - var wg sync.WaitGroup - wg.Add(concurrencyLevel) - defer wg.Wait() - b.StartTimer() - - for i := 0; i < concurrencyLevel; i++ { - go func() { - for { - if atomic.AddInt64(&remain, -1) < 0 { - wg.Done() - return - } - - var got string - tb.check(stmt.QueryRow(1).Scan(&got)) - if got != "one" { - b.Errorf("query = %q; want one", got) - wg.Done() - return - } - } - }() - } -} - -func BenchmarkExec(b *testing.B) { - tb := (*TB)(b) - b.StopTimer() - b.ReportAllocs() - db := tb.checkDB(sql.Open("mysql", dsn)) - db.SetMaxIdleConns(concurrencyLevel) - defer db.Close() - - stmt := tb.checkStmt(db.Prepare("DO 1")) - defer stmt.Close() - - remain := int64(b.N) - var wg sync.WaitGroup - wg.Add(concurrencyLevel) - defer wg.Wait() - b.StartTimer() - - for i := 0; i < concurrencyLevel; i++ { - go func() { - for { - if atomic.AddInt64(&remain, -1) < 0 { - wg.Done() - return - } - - if _, err := stmt.Exec(); err != nil { - b.Fatal(err.Error()) - } - } - }() - } -} - -// data, but no db writes -var roundtripSample []byte - -func initRoundtripBenchmarks() ([]byte, int, int) { - if roundtripSample == nil { - roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024)) - } - return roundtripSample, 16, len(roundtripSample) -} - -func BenchmarkRoundtripTxt(b *testing.B) { - b.StopTimer() - sample, min, max := initRoundtripBenchmarks() - sampleString := string(sample) - b.ReportAllocs() - tb := (*TB)(b) - db := tb.checkDB(sql.Open("mysql", dsn)) - defer db.Close() - b.StartTimer() - var result string - for i := 0; i < b.N; i++ { - length := min + i - if length > max { - length = max - } - test := sampleString[0:length] - rows := tb.checkRows(db.Query(`SELECT "` + test + `"`)) - if !rows.Next() { - rows.Close() - b.Fatalf("crashed") - } - err := rows.Scan(&result) - if err != nil { - rows.Close() - b.Fatalf("crashed") - } - if result != test { - rows.Close() - b.Errorf("mismatch") - } - rows.Close() - } -} - -func BenchmarkRoundtripBin(b *testing.B) { - b.StopTimer() - sample, min, max := initRoundtripBenchmarks() - b.ReportAllocs() - tb := (*TB)(b) - db := tb.checkDB(sql.Open("mysql", dsn)) - defer db.Close() - stmt := tb.checkStmt(db.Prepare("SELECT ?")) - defer stmt.Close() - b.StartTimer() - var result sql.RawBytes - for i := 0; i < b.N; i++ { - length := min + i - if length > max { - length = max - } - test := sample[0:length] - rows := tb.checkRows(stmt.Query(test)) - if !rows.Next() { - rows.Close() - b.Fatalf("crashed") - } - err := rows.Scan(&result) - if err != nil { - rows.Close() - b.Fatalf("crashed") - } - if !bytes.Equal(result, test) { - rows.Close() - b.Errorf("mismatch") - } - rows.Close() - } -} - -func BenchmarkInterpolation(b *testing.B) { - mc := &mysqlConn{ - cfg: &config{ - interpolateParams: true, - loc: time.UTC, - }, - maxPacketAllowed: maxPacketSize, - maxWriteSize: maxPacketSize - 1, - buf: newBuffer(nil), - } - - args := []driver.Value{ - int64(42424242), - float64(math.Pi), - false, - time.Unix(1423411542, 807015000), - []byte("bytes containing special chars ' \" \a \x00"), - "string containing special chars ' \" \a \x00", - } - q := "SELECT ?, ?, ?, ?, ?, ?" - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := mc.interpolateParams(q, args) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go deleted file mode 100644 index 509ce89e4..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go +++ /dev/null @@ -1,136 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import "io" - -const defaultBufSize = 4096 - -// A buffer which is used for both reading and writing. -// This is possible since communication on each connection is synchronous. -// In other words, we can't write and read simultaneously on the same connection. -// The buffer is similar to bufio.Reader / Writer but zero-copy-ish -// Also highly optimized for this particular use case. -type buffer struct { - buf []byte - rd io.Reader - idx int - length int -} - -func newBuffer(rd io.Reader) buffer { - var b [defaultBufSize]byte - return buffer{ - buf: b[:], - rd: rd, - } -} - -// fill reads into the buffer until at least _need_ bytes are in it -func (b *buffer) fill(need int) error { - n := b.length - - // move existing data to the beginning - if n > 0 && b.idx > 0 { - copy(b.buf[0:n], b.buf[b.idx:]) - } - - // grow buffer if necessary - // TODO: let the buffer shrink again at some point - // Maybe keep the org buf slice and swap back? - if need > len(b.buf) { - // Round up to the next multiple of the default size - newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) - copy(newBuf, b.buf) - b.buf = newBuf - } - - b.idx = 0 - - for { - nn, err := b.rd.Read(b.buf[n:]) - n += nn - - switch err { - case nil: - if n < need { - continue - } - b.length = n - return nil - - case io.EOF: - if n >= need { - b.length = n - return nil - } - return io.ErrUnexpectedEOF - - default: - return err - } - } -} - -// returns next N bytes from buffer. -// The returned slice is only guaranteed to be valid until the next read -func (b *buffer) readNext(need int) ([]byte, error) { - if b.length < need { - // refill - if err := b.fill(need); err != nil { - return nil, err - } - } - - offset := b.idx - b.idx += need - b.length -= need - return b.buf[offset:b.idx], nil -} - -// returns a buffer with the requested size. -// If possible, a slice from the existing buffer is returned. -// Otherwise a bigger buffer is made. -// Only one buffer (total) can be used at a time. -func (b *buffer) takeBuffer(length int) []byte { - if b.length > 0 { - return nil - } - - // test (cheap) general case first - if length <= defaultBufSize || length <= cap(b.buf) { - return b.buf[:length] - } - - if length < maxPacketSize { - b.buf = make([]byte, length) - return b.buf - } - return make([]byte, length) -} - -// shortcut which can be used if the requested buffer is guaranteed to be -// smaller than defaultBufSize -// Only one buffer (total) can be used at a time. -func (b *buffer) takeSmallBuffer(length int) []byte { - if b.length == 0 { - return b.buf[:length] - } - return nil -} - -// takeCompleteBuffer returns the complete existing buffer. -// This can be used if the necessary buffer size is unknown. -// Only one buffer (total) can be used at a time. -func (b *buffer) takeCompleteBuffer() []byte { - if b.length == 0 { - return b.buf - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go deleted file mode 100644 index 6c1d613d5..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go +++ /dev/null @@ -1,250 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -const defaultCollation byte = 33 // utf8_general_ci - -// A list of available collations mapped to the internal ID. -// To update this map use the following MySQL query: -// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS -var collations = map[string]byte{ - "big5_chinese_ci": 1, - "latin2_czech_cs": 2, - "dec8_swedish_ci": 3, - "cp850_general_ci": 4, - "latin1_german1_ci": 5, - "hp8_english_ci": 6, - "koi8r_general_ci": 7, - "latin1_swedish_ci": 8, - "latin2_general_ci": 9, - "swe7_swedish_ci": 10, - "ascii_general_ci": 11, - "ujis_japanese_ci": 12, - "sjis_japanese_ci": 13, - "cp1251_bulgarian_ci": 14, - "latin1_danish_ci": 15, - "hebrew_general_ci": 16, - "tis620_thai_ci": 18, - "euckr_korean_ci": 19, - "latin7_estonian_cs": 20, - "latin2_hungarian_ci": 21, - "koi8u_general_ci": 22, - "cp1251_ukrainian_ci": 23, - "gb2312_chinese_ci": 24, - "greek_general_ci": 25, - "cp1250_general_ci": 26, - "latin2_croatian_ci": 27, - "gbk_chinese_ci": 28, - "cp1257_lithuanian_ci": 29, - "latin5_turkish_ci": 30, - "latin1_german2_ci": 31, - "armscii8_general_ci": 32, - "utf8_general_ci": 33, - "cp1250_czech_cs": 34, - "ucs2_general_ci": 35, - "cp866_general_ci": 36, - "keybcs2_general_ci": 37, - "macce_general_ci": 38, - "macroman_general_ci": 39, - "cp852_general_ci": 40, - "latin7_general_ci": 41, - "latin7_general_cs": 42, - "macce_bin": 43, - "cp1250_croatian_ci": 44, - "utf8mb4_general_ci": 45, - "utf8mb4_bin": 46, - "latin1_bin": 47, - "latin1_general_ci": 48, - "latin1_general_cs": 49, - "cp1251_bin": 50, - "cp1251_general_ci": 51, - "cp1251_general_cs": 52, - "macroman_bin": 53, - "utf16_general_ci": 54, - "utf16_bin": 55, - "utf16le_general_ci": 56, - "cp1256_general_ci": 57, - "cp1257_bin": 58, - "cp1257_general_ci": 59, - "utf32_general_ci": 60, - "utf32_bin": 61, - "utf16le_bin": 62, - "binary": 63, - "armscii8_bin": 64, - "ascii_bin": 65, - "cp1250_bin": 66, - "cp1256_bin": 67, - "cp866_bin": 68, - "dec8_bin": 69, - "greek_bin": 70, - "hebrew_bin": 71, - "hp8_bin": 72, - "keybcs2_bin": 73, - "koi8r_bin": 74, - "koi8u_bin": 75, - "latin2_bin": 77, - "latin5_bin": 78, - "latin7_bin": 79, - "cp850_bin": 80, - "cp852_bin": 81, - "swe7_bin": 82, - "utf8_bin": 83, - "big5_bin": 84, - "euckr_bin": 85, - "gb2312_bin": 86, - "gbk_bin": 87, - "sjis_bin": 88, - "tis620_bin": 89, - "ucs2_bin": 90, - "ujis_bin": 91, - "geostd8_general_ci": 92, - "geostd8_bin": 93, - "latin1_spanish_ci": 94, - "cp932_japanese_ci": 95, - "cp932_bin": 96, - "eucjpms_japanese_ci": 97, - "eucjpms_bin": 98, - "cp1250_polish_ci": 99, - "utf16_unicode_ci": 101, - "utf16_icelandic_ci": 102, - "utf16_latvian_ci": 103, - "utf16_romanian_ci": 104, - "utf16_slovenian_ci": 105, - "utf16_polish_ci": 106, - "utf16_estonian_ci": 107, - "utf16_spanish_ci": 108, - "utf16_swedish_ci": 109, - "utf16_turkish_ci": 110, - "utf16_czech_ci": 111, - "utf16_danish_ci": 112, - "utf16_lithuanian_ci": 113, - "utf16_slovak_ci": 114, - "utf16_spanish2_ci": 115, - "utf16_roman_ci": 116, - "utf16_persian_ci": 117, - "utf16_esperanto_ci": 118, - "utf16_hungarian_ci": 119, - "utf16_sinhala_ci": 120, - "utf16_german2_ci": 121, - "utf16_croatian_ci": 122, - "utf16_unicode_520_ci": 123, - "utf16_vietnamese_ci": 124, - "ucs2_unicode_ci": 128, - "ucs2_icelandic_ci": 129, - "ucs2_latvian_ci": 130, - "ucs2_romanian_ci": 131, - "ucs2_slovenian_ci": 132, - "ucs2_polish_ci": 133, - "ucs2_estonian_ci": 134, - "ucs2_spanish_ci": 135, - "ucs2_swedish_ci": 136, - "ucs2_turkish_ci": 137, - "ucs2_czech_ci": 138, - "ucs2_danish_ci": 139, - "ucs2_lithuanian_ci": 140, - "ucs2_slovak_ci": 141, - "ucs2_spanish2_ci": 142, - "ucs2_roman_ci": 143, - "ucs2_persian_ci": 144, - "ucs2_esperanto_ci": 145, - "ucs2_hungarian_ci": 146, - "ucs2_sinhala_ci": 147, - "ucs2_german2_ci": 148, - "ucs2_croatian_ci": 149, - "ucs2_unicode_520_ci": 150, - "ucs2_vietnamese_ci": 151, - "ucs2_general_mysql500_ci": 159, - "utf32_unicode_ci": 160, - "utf32_icelandic_ci": 161, - "utf32_latvian_ci": 162, - "utf32_romanian_ci": 163, - "utf32_slovenian_ci": 164, - "utf32_polish_ci": 165, - "utf32_estonian_ci": 166, - "utf32_spanish_ci": 167, - "utf32_swedish_ci": 168, - "utf32_turkish_ci": 169, - "utf32_czech_ci": 170, - "utf32_danish_ci": 171, - "utf32_lithuanian_ci": 172, - "utf32_slovak_ci": 173, - "utf32_spanish2_ci": 174, - "utf32_roman_ci": 175, - "utf32_persian_ci": 176, - "utf32_esperanto_ci": 177, - "utf32_hungarian_ci": 178, - "utf32_sinhala_ci": 179, - "utf32_german2_ci": 180, - "utf32_croatian_ci": 181, - "utf32_unicode_520_ci": 182, - "utf32_vietnamese_ci": 183, - "utf8_unicode_ci": 192, - "utf8_icelandic_ci": 193, - "utf8_latvian_ci": 194, - "utf8_romanian_ci": 195, - "utf8_slovenian_ci": 196, - "utf8_polish_ci": 197, - "utf8_estonian_ci": 198, - "utf8_spanish_ci": 199, - "utf8_swedish_ci": 200, - "utf8_turkish_ci": 201, - "utf8_czech_ci": 202, - "utf8_danish_ci": 203, - "utf8_lithuanian_ci": 204, - "utf8_slovak_ci": 205, - "utf8_spanish2_ci": 206, - "utf8_roman_ci": 207, - "utf8_persian_ci": 208, - "utf8_esperanto_ci": 209, - "utf8_hungarian_ci": 210, - "utf8_sinhala_ci": 211, - "utf8_german2_ci": 212, - "utf8_croatian_ci": 213, - "utf8_unicode_520_ci": 214, - "utf8_vietnamese_ci": 215, - "utf8_general_mysql500_ci": 223, - "utf8mb4_unicode_ci": 224, - "utf8mb4_icelandic_ci": 225, - "utf8mb4_latvian_ci": 226, - "utf8mb4_romanian_ci": 227, - "utf8mb4_slovenian_ci": 228, - "utf8mb4_polish_ci": 229, - "utf8mb4_estonian_ci": 230, - "utf8mb4_spanish_ci": 231, - "utf8mb4_swedish_ci": 232, - "utf8mb4_turkish_ci": 233, - "utf8mb4_czech_ci": 234, - "utf8mb4_danish_ci": 235, - "utf8mb4_lithuanian_ci": 236, - "utf8mb4_slovak_ci": 237, - "utf8mb4_spanish2_ci": 238, - "utf8mb4_roman_ci": 239, - "utf8mb4_persian_ci": 240, - "utf8mb4_esperanto_ci": 241, - "utf8mb4_hungarian_ci": 242, - "utf8mb4_sinhala_ci": 243, - "utf8mb4_german2_ci": 244, - "utf8mb4_croatian_ci": 245, - "utf8mb4_unicode_520_ci": 246, - "utf8mb4_vietnamese_ci": 247, -} - -// A blacklist of collations which is unsafe to interpolate parameters. -// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes. -var unsafeCollations = map[byte]bool{ - 1: true, // big5_chinese_ci - 13: true, // sjis_japanese_ci - 28: true, // gbk_chinese_ci - 84: true, // big5_bin - 86: true, // gb2312_bin - 87: true, // gbk_bin - 88: true, // sjis_bin - 95: true, // cp932_japanese_ci - 96: true, // cp932_bin -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go deleted file mode 100644 index caaae013f..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go +++ /dev/null @@ -1,403 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "crypto/tls" - "database/sql/driver" - "errors" - "net" - "strconv" - "strings" - "time" -) - -type mysqlConn struct { - buf buffer - netConn net.Conn - affectedRows uint64 - insertId uint64 - cfg *config - maxPacketAllowed int - maxWriteSize int - flags clientFlag - status statusFlag - sequence uint8 - parseTime bool - strict bool -} - -type config struct { - user string - passwd string - net string - addr string - dbname string - params map[string]string - loc *time.Location - tls *tls.Config - timeout time.Duration - collation uint8 - allowAllFiles bool - allowOldPasswords bool - allowCleartextPasswords bool - clientFoundRows bool - columnsWithAlias bool - interpolateParams bool -} - -// Handles parameters set in DSN after the connection is established -func (mc *mysqlConn) handleParams() (err error) { - for param, val := range mc.cfg.params { - switch param { - // Charset - case "charset": - charsets := strings.Split(val, ",") - for i := range charsets { - // ignore errors here - a charset may not exist - err = mc.exec("SET NAMES " + charsets[i]) - if err == nil { - break - } - } - if err != nil { - return - } - - // time.Time parsing - case "parseTime": - var isBool bool - mc.parseTime, isBool = readBool(val) - if !isBool { - return errors.New("Invalid Bool value: " + val) - } - - // Strict mode - case "strict": - var isBool bool - mc.strict, isBool = readBool(val) - if !isBool { - return errors.New("Invalid Bool value: " + val) - } - - // Compression - case "compress": - err = errors.New("Compression not implemented yet") - return - - // System Vars - default: - err = mc.exec("SET " + param + "=" + val + "") - if err != nil { - return - } - } - } - - return -} - -func (mc *mysqlConn) Begin() (driver.Tx, error) { - if mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - err := mc.exec("START TRANSACTION") - if err == nil { - return &mysqlTx{mc}, err - } - - return nil, err -} - -func (mc *mysqlConn) Close() (err error) { - // Makes Close idempotent - if mc.netConn != nil { - err = mc.writeCommandPacket(comQuit) - if err == nil { - err = mc.netConn.Close() - } else { - mc.netConn.Close() - } - mc.netConn = nil - } - - mc.cfg = nil - mc.buf.rd = nil - - return -} - -func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { - if mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - // Send command - err := mc.writeCommandPacketStr(comStmtPrepare, query) - if err != nil { - return nil, err - } - - stmt := &mysqlStmt{ - mc: mc, - } - - // Read Result - columnCount, err := stmt.readPrepareResultPacket() - if err == nil { - if stmt.paramCount > 0 { - if err = mc.readUntilEOF(); err != nil { - return nil, err - } - } - - if columnCount > 0 { - err = mc.readUntilEOF() - } - } - - return stmt, err -} - -func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) { - buf := mc.buf.takeCompleteBuffer() - if buf == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return "", driver.ErrBadConn - } - buf = buf[:0] - argPos := 0 - - for i := 0; i < len(query); i++ { - q := strings.IndexByte(query[i:], '?') - if q == -1 { - buf = append(buf, query[i:]...) - break - } - buf = append(buf, query[i:i+q]...) - i += q - - arg := args[argPos] - argPos++ - - if arg == nil { - buf = append(buf, "NULL"...) - continue - } - - switch v := arg.(type) { - case int64: - buf = strconv.AppendInt(buf, v, 10) - case float64: - buf = strconv.AppendFloat(buf, v, 'g', -1, 64) - case bool: - if v { - buf = append(buf, '1') - } else { - buf = append(buf, '0') - } - case time.Time: - if v.IsZero() { - buf = append(buf, "'0000-00-00'"...) - } else { - v := v.In(mc.cfg.loc) - v = v.Add(time.Nanosecond * 500) // To round under microsecond - year := v.Year() - year100 := year / 100 - year1 := year % 100 - month := v.Month() - day := v.Day() - hour := v.Hour() - minute := v.Minute() - second := v.Second() - micro := v.Nanosecond() / 1000 - - buf = append(buf, []byte{ - '\'', - digits10[year100], digits01[year100], - digits10[year1], digits01[year1], - '-', - digits10[month], digits01[month], - '-', - digits10[day], digits01[day], - ' ', - digits10[hour], digits01[hour], - ':', - digits10[minute], digits01[minute], - ':', - digits10[second], digits01[second], - }...) - - if micro != 0 { - micro10000 := micro / 10000 - micro100 := micro / 100 % 100 - micro1 := micro % 100 - buf = append(buf, []byte{ - '.', - digits10[micro10000], digits01[micro10000], - digits10[micro100], digits01[micro100], - digits10[micro1], digits01[micro1], - }...) - } - buf = append(buf, '\'') - } - case []byte: - if v == nil { - buf = append(buf, "NULL"...) - } else { - buf = append(buf, '\'') - if mc.status&statusNoBackslashEscapes == 0 { - buf = escapeBytesBackslash(buf, v) - } else { - buf = escapeBytesQuotes(buf, v) - } - buf = append(buf, '\'') - } - case string: - buf = append(buf, '\'') - if mc.status&statusNoBackslashEscapes == 0 { - buf = escapeStringBackslash(buf, v) - } else { - buf = escapeStringQuotes(buf, v) - } - buf = append(buf, '\'') - default: - return "", driver.ErrSkip - } - - if len(buf)+4 > mc.maxPacketAllowed { - return "", driver.ErrSkip - } - } - if argPos != len(args) { - return "", driver.ErrSkip - } - return string(buf), nil -} - -func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { - if mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - if len(args) != 0 { - if !mc.cfg.interpolateParams { - return nil, driver.ErrSkip - } - // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement - prepared, err := mc.interpolateParams(query, args) - if err != nil { - return nil, err - } - query = prepared - args = nil - } - mc.affectedRows = 0 - mc.insertId = 0 - - err := mc.exec(query) - if err == nil { - return &mysqlResult{ - affectedRows: int64(mc.affectedRows), - insertId: int64(mc.insertId), - }, err - } - return nil, err -} - -// Internal function to execute commands -func (mc *mysqlConn) exec(query string) error { - // Send command - err := mc.writeCommandPacketStr(comQuery, query) - if err != nil { - return err - } - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err == nil && resLen > 0 { - if err = mc.readUntilEOF(); err != nil { - return err - } - - err = mc.readUntilEOF() - } - - return err -} - -func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { - if mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - if len(args) != 0 { - if !mc.cfg.interpolateParams { - return nil, driver.ErrSkip - } - // try client-side prepare to reduce roundtrip - prepared, err := mc.interpolateParams(query, args) - if err != nil { - return nil, err - } - query = prepared - args = nil - } - // Send command - err := mc.writeCommandPacketStr(comQuery, query) - if err == nil { - // Read Result - var resLen int - resLen, err = mc.readResultSetHeaderPacket() - if err == nil { - rows := new(textRows) - rows.mc = mc - - if resLen == 0 { - // no columns, no more data - return emptyRows{}, nil - } - // Columns - rows.columns, err = mc.readColumns(resLen) - return rows, err - } - } - return nil, err -} - -// Gets the value of the given MySQL System Variable -// The returned byte slice is only valid until the next read -func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { - // Send command - if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { - return nil, err - } - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err == nil { - rows := new(textRows) - rows.mc = mc - - if resLen > 0 { - // Columns - if err := mc.readUntilEOF(); err != nil { - return nil, err - } - } - - dest := make([]driver.Value, resLen) - if err = rows.readRow(dest); err == nil { - return dest[0].([]byte), mc.readUntilEOF() - } - } - return nil, err -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go deleted file mode 100644 index dddc12908..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go +++ /dev/null @@ -1,162 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -const ( - minProtocolVersion byte = 10 - maxPacketSize = 1<<24 - 1 - timeFormat = "2006-01-02 15:04:05.999999" -) - -// MySQL constants documentation: -// http://dev.mysql.com/doc/internals/en/client-server-protocol.html - -const ( - iOK byte = 0x00 - iLocalInFile byte = 0xfb - iEOF byte = 0xfe - iERR byte = 0xff -) - -// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags -type clientFlag uint32 - -const ( - clientLongPassword clientFlag = 1 << iota - clientFoundRows - clientLongFlag - clientConnectWithDB - clientNoSchema - clientCompress - clientODBC - clientLocalFiles - clientIgnoreSpace - clientProtocol41 - clientInteractive - clientSSL - clientIgnoreSIGPIPE - clientTransactions - clientReserved - clientSecureConn - clientMultiStatements - clientMultiResults - clientPSMultiResults - clientPluginAuth - clientConnectAttrs - clientPluginAuthLenEncClientData - clientCanHandleExpiredPasswords - clientSessionTrack - clientDeprecateEOF -) - -const ( - comQuit byte = iota + 1 - comInitDB - comQuery - comFieldList - comCreateDB - comDropDB - comRefresh - comShutdown - comStatistics - comProcessInfo - comConnect - comProcessKill - comDebug - comPing - comTime - comDelayedInsert - comChangeUser - comBinlogDump - comTableDump - comConnectOut - comRegisterSlave - comStmtPrepare - comStmtExecute - comStmtSendLongData - comStmtClose - comStmtReset - comSetOption - comStmtFetch -) - -// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType -const ( - fieldTypeDecimal byte = iota - fieldTypeTiny - fieldTypeShort - fieldTypeLong - fieldTypeFloat - fieldTypeDouble - fieldTypeNULL - fieldTypeTimestamp - fieldTypeLongLong - fieldTypeInt24 - fieldTypeDate - fieldTypeTime - fieldTypeDateTime - fieldTypeYear - fieldTypeNewDate - fieldTypeVarChar - fieldTypeBit -) -const ( - fieldTypeNewDecimal byte = iota + 0xf6 - fieldTypeEnum - fieldTypeSet - fieldTypeTinyBLOB - fieldTypeMediumBLOB - fieldTypeLongBLOB - fieldTypeBLOB - fieldTypeVarString - fieldTypeString - fieldTypeGeometry -) - -type fieldFlag uint16 - -const ( - flagNotNULL fieldFlag = 1 << iota - flagPriKey - flagUniqueKey - flagMultipleKey - flagBLOB - flagUnsigned - flagZeroFill - flagBinary - flagEnum - flagAutoIncrement - flagTimestamp - flagSet - flagUnknown1 - flagUnknown2 - flagUnknown3 - flagUnknown4 -) - -// http://dev.mysql.com/doc/internals/en/status-flags.html -type statusFlag uint16 - -const ( - statusInTrans statusFlag = 1 << iota - statusInAutocommit - statusReserved // Not in documentation - statusMoreResultsExists - statusNoGoodIndexUsed - statusNoIndexUsed - statusCursorExists - statusLastRowSent - statusDbDropped - statusNoBackslashEscapes - statusMetadataChanged - statusQueryWasSlow - statusPsOutParams - statusInTransReadonly - statusSessionStateChanged -) diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go deleted file mode 100644 index d310624ad..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// The driver should be used via the database/sql package: -// -// import "database/sql" -// import _ "github.com/go-sql-driver/mysql" -// -// db, err := sql.Open("mysql", "user:password@/dbname") -// -// See https://github.com/go-sql-driver/mysql#usage for details -package mysql - -import ( - "database/sql" - "database/sql/driver" - "net" -) - -// This struct is exported to make the driver directly accessible. -// In general the driver is used via the database/sql package. -type MySQLDriver struct{} - -// DialFunc is a function which can be used to establish the network connection. -// Custom dial functions must be registered with RegisterDial -type DialFunc func(addr string) (net.Conn, error) - -var dials map[string]DialFunc - -// RegisterDial registers a custom dial function. It can then be used by the -// network address mynet(addr), where mynet is the registered new network. -// addr is passed as a parameter to the dial function. -func RegisterDial(net string, dial DialFunc) { - if dials == nil { - dials = make(map[string]DialFunc) - } - dials[net] = dial -} - -// Open new Connection. -// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how -// the DSN string is formated -func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { - var err error - - // New mysqlConn - mc := &mysqlConn{ - maxPacketAllowed: maxPacketSize, - maxWriteSize: maxPacketSize - 1, - } - mc.cfg, err = parseDSN(dsn) - if err != nil { - return nil, err - } - - // Connect to Server - if dial, ok := dials[mc.cfg.net]; ok { - mc.netConn, err = dial(mc.cfg.addr) - } else { - nd := net.Dialer{Timeout: mc.cfg.timeout} - mc.netConn, err = nd.Dial(mc.cfg.net, mc.cfg.addr) - } - if err != nil { - return nil, err - } - - // Enable TCP Keepalives on TCP connections - if tc, ok := mc.netConn.(*net.TCPConn); ok { - if err := tc.SetKeepAlive(true); err != nil { - // Don't send COM_QUIT before handshake. - mc.netConn.Close() - mc.netConn = nil - return nil, err - } - } - - mc.buf = newBuffer(mc.netConn) - - // Reading Handshake Initialization Packet - cipher, err := mc.readInitPacket() - if err != nil { - mc.Close() - return nil, err - } - - // Send Client Authentication Packet - if err = mc.writeAuthPacket(cipher); err != nil { - mc.Close() - return nil, err - } - - // Read Result Packet - err = mc.readResultOK() - if err != nil { - // Retry with old authentication method, if allowed - if mc.cfg != nil && mc.cfg.allowOldPasswords && err == ErrOldPassword { - if err = mc.writeOldAuthPacket(cipher); err != nil { - mc.Close() - return nil, err - } - if err = mc.readResultOK(); err != nil { - mc.Close() - return nil, err - } - } else if mc.cfg != nil && mc.cfg.allowCleartextPasswords && err == ErrCleartextPassword { - if err = mc.writeClearAuthPacket(); err != nil { - mc.Close() - return nil, err - } - if err = mc.readResultOK(); err != nil { - mc.Close() - return nil, err - } - } else { - mc.Close() - return nil, err - } - - } - - // Get max allowed packet size - maxap, err := mc.getSystemVar("max_allowed_packet") - if err != nil { - mc.Close() - return nil, err - } - mc.maxPacketAllowed = stringToInt(maxap) - 1 - if mc.maxPacketAllowed < maxPacketSize { - mc.maxWriteSize = mc.maxPacketAllowed - } - - // Handle DSN Params - err = mc.handleParams() - if err != nil { - mc.Close() - return nil, err - } - - return mc, nil -} - -func init() { - sql.Register("mysql", &MySQLDriver{}) -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go deleted file mode 100644 index f9da416ec..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go +++ /dev/null @@ -1,1681 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "crypto/tls" - "database/sql" - "database/sql/driver" - "fmt" - "io" - "io/ioutil" - "net" - "net/url" - "os" - "strings" - "sync" - "sync/atomic" - "testing" - "time" -) - -var ( - user string - pass string - prot string - addr string - dbname string - dsn string - netAddr string - available bool -) - -var ( - tDate = time.Date(2012, 6, 14, 0, 0, 0, 0, time.UTC) - sDate = "2012-06-14" - tDateTime = time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC) - sDateTime = "2011-11-20 21:27:37" - tDate0 = time.Time{} - sDate0 = "0000-00-00" - sDateTime0 = "0000-00-00 00:00:00" -) - -// See https://github.com/go-sql-driver/mysql/wiki/Testing -func init() { - // get environment variables - env := func(key, defaultValue string) string { - if value := os.Getenv(key); value != "" { - return value - } - return defaultValue - } - user = env("MYSQL_TEST_USER", "root") - pass = env("MYSQL_TEST_PASS", "") - prot = env("MYSQL_TEST_PROT", "tcp") - addr = env("MYSQL_TEST_ADDR", "localhost:3306") - dbname = env("MYSQL_TEST_DBNAME", "gotest") - netAddr = fmt.Sprintf("%s(%s)", prot, addr) - dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s&strict=true", user, pass, netAddr, dbname) - c, err := net.Dial(prot, addr) - if err == nil { - available = true - c.Close() - } -} - -type DBTest struct { - *testing.T - db *sql.DB -} - -func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) { - if !available { - t.Skipf("MySQL-Server not running on %s", netAddr) - } - - db, err := sql.Open("mysql", dsn) - if err != nil { - t.Fatalf("Error connecting: %s", err.Error()) - } - defer db.Close() - - db.Exec("DROP TABLE IF EXISTS test") - - dsn2 := dsn + "&interpolateParams=true" - var db2 *sql.DB - if _, err := parseDSN(dsn2); err != errInvalidDSNUnsafeCollation { - db2, err = sql.Open("mysql", dsn2) - if err != nil { - t.Fatalf("Error connecting: %s", err.Error()) - } - defer db2.Close() - } - - dbt := &DBTest{t, db} - dbt2 := &DBTest{t, db2} - for _, test := range tests { - test(dbt) - dbt.db.Exec("DROP TABLE IF EXISTS test") - if db2 != nil { - test(dbt2) - dbt2.db.Exec("DROP TABLE IF EXISTS test") - } - } -} - -func (dbt *DBTest) fail(method, query string, err error) { - if len(query) > 300 { - query = "[query too large to print]" - } - dbt.Fatalf("Error on %s %s: %s", method, query, err.Error()) -} - -func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) { - res, err := dbt.db.Exec(query, args...) - if err != nil { - dbt.fail("Exec", query, err) - } - return res -} - -func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) { - rows, err := dbt.db.Query(query, args...) - if err != nil { - dbt.fail("Query", query, err) - } - return rows -} - -func TestEmptyQuery(t *testing.T) { - runTests(t, dsn, func(dbt *DBTest) { - // just a comment, no query - rows := dbt.mustQuery("--") - // will hang before #255 - if rows.Next() { - dbt.Errorf("Next on rows must be false") - } - }) -} - -func TestCRUD(t *testing.T) { - runTests(t, dsn, func(dbt *DBTest) { - // Create Table - dbt.mustExec("CREATE TABLE test (value BOOL)") - - // Test for unexpected data - var out bool - rows := dbt.mustQuery("SELECT * FROM test") - if rows.Next() { - dbt.Error("unexpected data in empty table") - } - - // Create Data - res := dbt.mustExec("INSERT INTO test VALUES (1)") - count, err := res.RowsAffected() - if err != nil { - dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) - } - if count != 1 { - dbt.Fatalf("Expected 1 affected row, got %d", count) - } - - id, err := res.LastInsertId() - if err != nil { - dbt.Fatalf("res.LastInsertId() returned error: %s", err.Error()) - } - if id != 0 { - dbt.Fatalf("Expected InsertID 0, got %d", id) - } - - // Read - rows = dbt.mustQuery("SELECT value FROM test") - if rows.Next() { - rows.Scan(&out) - if true != out { - dbt.Errorf("true != %t", out) - } - - if rows.Next() { - dbt.Error("unexpected data") - } - } else { - dbt.Error("no data") - } - - // Update - res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true) - count, err = res.RowsAffected() - if err != nil { - dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) - } - if count != 1 { - dbt.Fatalf("Expected 1 affected row, got %d", count) - } - - // Check Update - rows = dbt.mustQuery("SELECT value FROM test") - if rows.Next() { - rows.Scan(&out) - if false != out { - dbt.Errorf("false != %t", out) - } - - if rows.Next() { - dbt.Error("unexpected data") - } - } else { - dbt.Error("no data") - } - - // Delete - res = dbt.mustExec("DELETE FROM test WHERE value = ?", false) - count, err = res.RowsAffected() - if err != nil { - dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) - } - if count != 1 { - dbt.Fatalf("Expected 1 affected row, got %d", count) - } - - // Check for unexpected rows - res = dbt.mustExec("DELETE FROM test") - count, err = res.RowsAffected() - if err != nil { - dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) - } - if count != 0 { - dbt.Fatalf("Expected 0 affected row, got %d", count) - } - }) -} - -func TestInt(t *testing.T) { - runTests(t, dsn, func(dbt *DBTest) { - types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"} - in := int64(42) - var out int64 - var rows *sql.Rows - - // SIGNED - for _, v := range types { - dbt.mustExec("CREATE TABLE test (value " + v + ")") - - dbt.mustExec("INSERT INTO test VALUES (?)", in) - - rows = dbt.mustQuery("SELECT value FROM test") - if rows.Next() { - rows.Scan(&out) - if in != out { - dbt.Errorf("%s: %d != %d", v, in, out) - } - } else { - dbt.Errorf("%s: no data", v) - } - - dbt.mustExec("DROP TABLE IF EXISTS test") - } - - // UNSIGNED ZEROFILL - for _, v := range types { - dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)") - - dbt.mustExec("INSERT INTO test VALUES (?)", in) - - rows = dbt.mustQuery("SELECT value FROM test") - if rows.Next() { - rows.Scan(&out) - if in != out { - dbt.Errorf("%s ZEROFILL: %d != %d", v, in, out) - } - } else { - dbt.Errorf("%s ZEROFILL: no data", v) - } - - dbt.mustExec("DROP TABLE IF EXISTS test") - } - }) -} - -func TestFloat(t *testing.T) { - runTests(t, dsn, func(dbt *DBTest) { - types := [2]string{"FLOAT", "DOUBLE"} - in := float32(42.23) - var out float32 - var rows *sql.Rows - for _, v := range types { - dbt.mustExec("CREATE TABLE test (value " + v + ")") - dbt.mustExec("INSERT INTO test VALUES (?)", in) - rows = dbt.mustQuery("SELECT value FROM test") - if rows.Next() { - rows.Scan(&out) - if in != out { - dbt.Errorf("%s: %g != %g", v, in, out) - } - } else { - dbt.Errorf("%s: no data", v) - } - dbt.mustExec("DROP TABLE IF EXISTS test") - } - }) -} - -func TestString(t *testing.T) { - runTests(t, dsn, func(dbt *DBTest) { - types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"} - in := "κόσμε üöäßñóùéàâÿœ'îë Árvíztűrő いろはにほへとちりぬるを イロハニホヘト דג סקרן чащах น่าฟังเอย" - var out string - var rows *sql.Rows - - for _, v := range types { - dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8") - - dbt.mustExec("INSERT INTO test VALUES (?)", in) - - rows = dbt.mustQuery("SELECT value FROM test") - if rows.Next() { - rows.Scan(&out) - if in != out { - dbt.Errorf("%s: %s != %s", v, in, out) - } - } else { - dbt.Errorf("%s: no data", v) - } - - dbt.mustExec("DROP TABLE IF EXISTS test") - } - - // BLOB - dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8") - - id := 2 - in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + - "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + - "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + - "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. " + - "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + - "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + - "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + - "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet." - dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in) - - err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out) - if err != nil { - dbt.Fatalf("Error on BLOB-Query: %s", err.Error()) - } else if out != in { - dbt.Errorf("BLOB: %s != %s", in, out) - } - }) -} - -type timeTests struct { - dbtype string - tlayout string - tests []timeTest -} - -type timeTest struct { - s string // leading "!": do not use t as value in queries - t time.Time -} - -type timeMode byte - -func (t timeMode) String() string { - switch t { - case binaryString: - return "binary:string" - case binaryTime: - return "binary:time.Time" - case textString: - return "text:string" - } - panic("unsupported timeMode") -} - -func (t timeMode) Binary() bool { - switch t { - case binaryString, binaryTime: - return true - } - return false -} - -const ( - binaryString timeMode = iota - binaryTime - textString -) - -func (t timeTest) genQuery(dbtype string, mode timeMode) string { - var inner string - if mode.Binary() { - inner = "?" - } else { - inner = `"%s"` - } - return `SELECT cast(` + inner + ` as ` + dbtype + `)` -} - -func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) { - var rows *sql.Rows - query := t.genQuery(dbtype, mode) - switch mode { - case binaryString: - rows = dbt.mustQuery(query, t.s) - case binaryTime: - rows = dbt.mustQuery(query, t.t) - case textString: - query = fmt.Sprintf(query, t.s) - rows = dbt.mustQuery(query) - default: - panic("unsupported mode") - } - defer rows.Close() - var err error - if !rows.Next() { - err = rows.Err() - if err == nil { - err = fmt.Errorf("no data") - } - dbt.Errorf("%s [%s]: %s", dbtype, mode, err) - return - } - var dst interface{} - err = rows.Scan(&dst) - if err != nil { - dbt.Errorf("%s [%s]: %s", dbtype, mode, err) - return - } - switch val := dst.(type) { - case []uint8: - str := string(val) - if str == t.s { - return - } - if mode.Binary() && dbtype == "DATETIME" && len(str) == 26 && str[:19] == t.s { - // a fix mainly for TravisCI: - // accept full microsecond resolution in result for DATETIME columns - // where the binary protocol was used - return - } - dbt.Errorf("%s [%s] to string: expected %q, got %q", - dbtype, mode, - t.s, str, - ) - case time.Time: - if val == t.t { - return - } - dbt.Errorf("%s [%s] to string: expected %q, got %q", - dbtype, mode, - t.s, val.Format(tlayout), - ) - default: - fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t}) - dbt.Errorf("%s [%s]: unhandled type %T (is '%v')", - dbtype, mode, - val, val, - ) - } -} - -func TestDateTime(t *testing.T) { - afterTime := func(t time.Time, d string) time.Time { - dur, err := time.ParseDuration(d) - if err != nil { - panic(err) - } - return t.Add(dur) - } - // NOTE: MySQL rounds DATETIME(x) up - but that's not included in the tests - format := "2006-01-02 15:04:05.999999" - t0 := time.Time{} - tstr0 := "0000-00-00 00:00:00.000000" - testcases := []timeTests{ - {"DATE", format[:10], []timeTest{ - {t: time.Date(2011, 11, 20, 0, 0, 0, 0, time.UTC)}, - {t: t0, s: tstr0[:10]}, - }}, - {"DATETIME", format[:19], []timeTest{ - {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)}, - {t: t0, s: tstr0[:19]}, - }}, - {"DATETIME(0)", format[:21], []timeTest{ - {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)}, - {t: t0, s: tstr0[:19]}, - }}, - {"DATETIME(1)", format[:21], []timeTest{ - {t: time.Date(2011, 11, 20, 21, 27, 37, 100000000, time.UTC)}, - {t: t0, s: tstr0[:21]}, - }}, - {"DATETIME(6)", format, []timeTest{ - {t: time.Date(2011, 11, 20, 21, 27, 37, 123456000, time.UTC)}, - {t: t0, s: tstr0}, - }}, - {"TIME", format[11:19], []timeTest{ - {t: afterTime(t0, "12345s")}, - {s: "!-12:34:56"}, - {s: "!-838:59:59"}, - {s: "!838:59:59"}, - {t: t0, s: tstr0[11:19]}, - }}, - {"TIME(0)", format[11:19], []timeTest{ - {t: afterTime(t0, "12345s")}, - {s: "!-12:34:56"}, - {s: "!-838:59:59"}, - {s: "!838:59:59"}, - {t: t0, s: tstr0[11:19]}, - }}, - {"TIME(1)", format[11:21], []timeTest{ - {t: afterTime(t0, "12345600ms")}, - {s: "!-12:34:56.7"}, - {s: "!-838:59:58.9"}, - {s: "!838:59:58.9"}, - {t: t0, s: tstr0[11:21]}, - }}, - {"TIME(6)", format[11:], []timeTest{ - {t: afterTime(t0, "1234567890123000ns")}, - {s: "!-12:34:56.789012"}, - {s: "!-838:59:58.999999"}, - {s: "!838:59:58.999999"}, - {t: t0, s: tstr0[11:]}, - }}, - } - dsns := []string{ - dsn + "&parseTime=true", - dsn + "&parseTime=false", - } - for _, testdsn := range dsns { - runTests(t, testdsn, func(dbt *DBTest) { - microsecsSupported := false - zeroDateSupported := false - var rows *sql.Rows - var err error - rows, err = dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`) - if err == nil { - rows.Scan(µsecsSupported) - rows.Close() - } - rows, err = dbt.db.Query(`SELECT cast("0000-00-00" as DATE) = "0000-00-00"`) - if err == nil { - rows.Scan(&zeroDateSupported) - rows.Close() - } - for _, setups := range testcases { - if t := setups.dbtype; !microsecsSupported && t[len(t)-1:] == ")" { - // skip fractional second tests if unsupported by server - continue - } - for _, setup := range setups.tests { - allowBinTime := true - if setup.s == "" { - // fill time string whereever Go can reliable produce it - setup.s = setup.t.Format(setups.tlayout) - } else if setup.s[0] == '!' { - // skip tests using setup.t as source in queries - allowBinTime = false - // fix setup.s - remove the "!" - setup.s = setup.s[1:] - } - if !zeroDateSupported && setup.s == tstr0[:len(setup.s)] { - // skip disallowed 0000-00-00 date - continue - } - setup.run(dbt, setups.dbtype, setups.tlayout, textString) - setup.run(dbt, setups.dbtype, setups.tlayout, binaryString) - if allowBinTime { - setup.run(dbt, setups.dbtype, setups.tlayout, binaryTime) - } - } - } - }) - } -} - -func TestTimestampMicros(t *testing.T) { - format := "2006-01-02 15:04:05.999999" - f0 := format[:19] - f1 := format[:21] - f6 := format[:26] - runTests(t, dsn, func(dbt *DBTest) { - // check if microseconds are supported. - // Do not use timestamp(x) for that check - before 5.5.6, x would mean display width - // and not precision. - // Se last paragraph at http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html - microsecsSupported := false - if rows, err := dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`); err == nil { - rows.Scan(µsecsSupported) - rows.Close() - } - if !microsecsSupported { - // skip test - return - } - _, err := dbt.db.Exec(` - CREATE TABLE test ( - value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `', - value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `', - value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `' - )`, - ) - if err != nil { - dbt.Error(err) - } - defer dbt.mustExec("DROP TABLE IF EXISTS test") - dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6) - var res0, res1, res6 string - rows := dbt.mustQuery("SELECT * FROM test") - if !rows.Next() { - dbt.Errorf("test contained no selectable values") - } - err = rows.Scan(&res0, &res1, &res6) - if err != nil { - dbt.Error(err) - } - if res0 != f0 { - dbt.Errorf("expected %q, got %q", f0, res0) - } - if res1 != f1 { - dbt.Errorf("expected %q, got %q", f1, res1) - } - if res6 != f6 { - dbt.Errorf("expected %q, got %q", f6, res6) - } - }) -} - -func TestNULL(t *testing.T) { - runTests(t, dsn, func(dbt *DBTest) { - nullStmt, err := dbt.db.Prepare("SELECT NULL") - if err != nil { - dbt.Fatal(err) - } - defer nullStmt.Close() - - nonNullStmt, err := dbt.db.Prepare("SELECT 1") - if err != nil { - dbt.Fatal(err) - } - defer nonNullStmt.Close() - - // NullBool - var nb sql.NullBool - // Invalid - if err = nullStmt.QueryRow().Scan(&nb); err != nil { - dbt.Fatal(err) - } - if nb.Valid { - dbt.Error("Valid NullBool which should be invalid") - } - // Valid - if err = nonNullStmt.QueryRow().Scan(&nb); err != nil { - dbt.Fatal(err) - } - if !nb.Valid { - dbt.Error("Invalid NullBool which should be valid") - } else if nb.Bool != true { - dbt.Errorf("Unexpected NullBool value: %t (should be true)", nb.Bool) - } - - // NullFloat64 - var nf sql.NullFloat64 - // Invalid - if err = nullStmt.QueryRow().Scan(&nf); err != nil { - dbt.Fatal(err) - } - if nf.Valid { - dbt.Error("Valid NullFloat64 which should be invalid") - } - // Valid - if err = nonNullStmt.QueryRow().Scan(&nf); err != nil { - dbt.Fatal(err) - } - if !nf.Valid { - dbt.Error("Invalid NullFloat64 which should be valid") - } else if nf.Float64 != float64(1) { - dbt.Errorf("Unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64) - } - - // NullInt64 - var ni sql.NullInt64 - // Invalid - if err = nullStmt.QueryRow().Scan(&ni); err != nil { - dbt.Fatal(err) - } - if ni.Valid { - dbt.Error("Valid NullInt64 which should be invalid") - } - // Valid - if err = nonNullStmt.QueryRow().Scan(&ni); err != nil { - dbt.Fatal(err) - } - if !ni.Valid { - dbt.Error("Invalid NullInt64 which should be valid") - } else if ni.Int64 != int64(1) { - dbt.Errorf("Unexpected NullInt64 value: %d (should be 1)", ni.Int64) - } - - // NullString - var ns sql.NullString - // Invalid - if err = nullStmt.QueryRow().Scan(&ns); err != nil { - dbt.Fatal(err) - } - if ns.Valid { - dbt.Error("Valid NullString which should be invalid") - } - // Valid - if err = nonNullStmt.QueryRow().Scan(&ns); err != nil { - dbt.Fatal(err) - } - if !ns.Valid { - dbt.Error("Invalid NullString which should be valid") - } else if ns.String != `1` { - dbt.Error("Unexpected NullString value:" + ns.String + " (should be `1`)") - } - - // nil-bytes - var b []byte - // Read nil - if err = nullStmt.QueryRow().Scan(&b); err != nil { - dbt.Fatal(err) - } - if b != nil { - dbt.Error("Non-nil []byte wich should be nil") - } - // Read non-nil - if err = nonNullStmt.QueryRow().Scan(&b); err != nil { - dbt.Fatal(err) - } - if b == nil { - dbt.Error("Nil []byte wich should be non-nil") - } - // Insert nil - b = nil - success := false - if err = dbt.db.QueryRow("SELECT ? IS NULL", b).Scan(&success); err != nil { - dbt.Fatal(err) - } - if !success { - dbt.Error("Inserting []byte(nil) as NULL failed") - } - // Check input==output with input==nil - b = nil - if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { - dbt.Fatal(err) - } - if b != nil { - dbt.Error("Non-nil echo from nil input") - } - // Check input==output with input!=nil - b = []byte("") - if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { - dbt.Fatal(err) - } - if b == nil { - dbt.Error("nil echo from non-nil input") - } - - // Insert NULL - dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)") - - dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2) - - var out interface{} - rows := dbt.mustQuery("SELECT * FROM test") - if rows.Next() { - rows.Scan(&out) - if out != nil { - dbt.Errorf("%v != nil", out) - } - } else { - dbt.Error("no data") - } - }) -} - -func TestUint64(t *testing.T) { - const ( - u0 = uint64(0) - uall = ^u0 - uhigh = uall >> 1 - utop = ^uhigh - s0 = int64(0) - sall = ^s0 - shigh = int64(uhigh) - stop = ^shigh - ) - runTests(t, dsn, func(dbt *DBTest) { - stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`) - if err != nil { - dbt.Fatal(err) - } - defer stmt.Close() - row := stmt.QueryRow( - u0, uhigh, utop, uall, - s0, shigh, stop, sall, - ) - - var ua, ub, uc, ud uint64 - var sa, sb, sc, sd int64 - - err = row.Scan(&ua, &ub, &uc, &ud, &sa, &sb, &sc, &sd) - if err != nil { - dbt.Fatal(err) - } - switch { - case ua != u0, - ub != uhigh, - uc != utop, - ud != uall, - sa != s0, - sb != shigh, - sc != stop, - sd != sall: - dbt.Fatal("Unexpected result value") - } - }) -} - -func TestLongData(t *testing.T) { - runTests(t, dsn, func(dbt *DBTest) { - var maxAllowedPacketSize int - err := dbt.db.QueryRow("select @@max_allowed_packet").Scan(&maxAllowedPacketSize) - if err != nil { - dbt.Fatal(err) - } - maxAllowedPacketSize-- - - // don't get too ambitious - if maxAllowedPacketSize > 1<<25 { - maxAllowedPacketSize = 1 << 25 - } - - dbt.mustExec("CREATE TABLE test (value LONGBLOB)") - - in := strings.Repeat(`a`, maxAllowedPacketSize+1) - var out string - var rows *sql.Rows - - // Long text data - const nonDataQueryLen = 28 // length query w/o value - inS := in[:maxAllowedPacketSize-nonDataQueryLen] - dbt.mustExec("INSERT INTO test VALUES('" + inS + "')") - rows = dbt.mustQuery("SELECT value FROM test") - if rows.Next() { - rows.Scan(&out) - if inS != out { - dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out)) - } - if rows.Next() { - dbt.Error("LONGBLOB: unexpexted row") - } - } else { - dbt.Fatalf("LONGBLOB: no data") - } - - // Empty table - dbt.mustExec("TRUNCATE TABLE test") - - // Long binary data - dbt.mustExec("INSERT INTO test VALUES(?)", in) - rows = dbt.mustQuery("SELECT value FROM test WHERE 1=?", 1) - if rows.Next() { - rows.Scan(&out) - if in != out { - dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out)) - } - if rows.Next() { - dbt.Error("LONGBLOB: unexpexted row") - } - } else { - if err = rows.Err(); err != nil { - dbt.Fatalf("LONGBLOB: no data (err: %s)", err.Error()) - } else { - dbt.Fatal("LONGBLOB: no data (err: )") - } - } - }) -} - -func TestLoadData(t *testing.T) { - runTests(t, dsn, func(dbt *DBTest) { - verifyLoadDataResult := func() { - rows, err := dbt.db.Query("SELECT * FROM test") - if err != nil { - dbt.Fatal(err.Error()) - } - - i := 0 - values := [4]string{ - "a string", - "a string containing a \t", - "a string containing a \n", - "a string containing both \t\n", - } - - var id int - var value string - - for rows.Next() { - i++ - err = rows.Scan(&id, &value) - if err != nil { - dbt.Fatal(err.Error()) - } - if i != id { - dbt.Fatalf("%d != %d", i, id) - } - if values[i-1] != value { - dbt.Fatalf("%q != %q", values[i-1], value) - } - } - err = rows.Err() - if err != nil { - dbt.Fatal(err.Error()) - } - - if i != 4 { - dbt.Fatalf("Rows count mismatch. Got %d, want 4", i) - } - } - file, err := ioutil.TempFile("", "gotest") - defer os.Remove(file.Name()) - if err != nil { - dbt.Fatal(err) - } - file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n") - file.Close() - - dbt.db.Exec("DROP TABLE IF EXISTS test") - dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8") - - // Local File - RegisterLocalFile(file.Name()) - dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name())) - verifyLoadDataResult() - // negative test - _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test") - if err == nil { - dbt.Fatal("Load non-existent file didn't fail") - } else if err.Error() != "Local File 'doesnotexist' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files" { - dbt.Fatal(err.Error()) - } - - // Empty table - dbt.mustExec("TRUNCATE TABLE test") - - // Reader - RegisterReaderHandler("test", func() io.Reader { - file, err = os.Open(file.Name()) - if err != nil { - dbt.Fatal(err) - } - return file - }) - dbt.mustExec("LOAD DATA LOCAL INFILE 'Reader::test' INTO TABLE test") - verifyLoadDataResult() - // negative test - _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test") - if err == nil { - dbt.Fatal("Load non-existent Reader didn't fail") - } else if err.Error() != "Reader 'doesnotexist' is not registered" { - dbt.Fatal(err.Error()) - } - }) -} - -func TestFoundRows(t *testing.T) { - runTests(t, dsn, func(dbt *DBTest) { - dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") - dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") - - res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") - count, err := res.RowsAffected() - if err != nil { - dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) - } - if count != 2 { - dbt.Fatalf("Expected 2 affected rows, got %d", count) - } - res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") - count, err = res.RowsAffected() - if err != nil { - dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) - } - if count != 2 { - dbt.Fatalf("Expected 2 affected rows, got %d", count) - } - }) - runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) { - dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") - dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") - - res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") - count, err := res.RowsAffected() - if err != nil { - dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) - } - if count != 2 { - dbt.Fatalf("Expected 2 matched rows, got %d", count) - } - res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") - count, err = res.RowsAffected() - if err != nil { - dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) - } - if count != 3 { - dbt.Fatalf("Expected 3 matched rows, got %d", count) - } - }) -} - -func TestStrict(t *testing.T) { - // ALLOW_INVALID_DATES to get rid of stricter modes - we want to test for warnings, not errors - relaxedDsn := dsn + "&sql_mode=ALLOW_INVALID_DATES" - // make sure the MySQL version is recent enough with a separate connection - // before running the test - conn, err := MySQLDriver{}.Open(relaxedDsn) - if conn != nil { - conn.Close() - } - if me, ok := err.(*MySQLError); ok && me.Number == 1231 { - // Error 1231: Variable 'sql_mode' can't be set to the value of 'ALLOW_INVALID_DATES' - // => skip test, MySQL server version is too old - return - } - runTests(t, relaxedDsn, func(dbt *DBTest) { - dbt.mustExec("CREATE TABLE test (a TINYINT NOT NULL, b CHAR(4))") - - var queries = [...]struct { - in string - codes []string - }{ - {"DROP TABLE IF EXISTS no_such_table", []string{"1051"}}, - {"INSERT INTO test VALUES(10,'mysql'),(NULL,'test'),(300,'Open Source')", []string{"1265", "1048", "1264", "1265"}}, - } - var err error - - var checkWarnings = func(err error, mode string, idx int) { - if err == nil { - dbt.Errorf("Expected STRICT error on query [%s] %s", mode, queries[idx].in) - } - - if warnings, ok := err.(MySQLWarnings); ok { - var codes = make([]string, len(warnings)) - for i := range warnings { - codes[i] = warnings[i].Code - } - if len(codes) != len(queries[idx].codes) { - dbt.Errorf("Unexpected STRICT error count on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) - } - - for i := range warnings { - if codes[i] != queries[idx].codes[i] { - dbt.Errorf("Unexpected STRICT error codes on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) - return - } - } - - } else { - dbt.Errorf("Unexpected error on query [%s] %s: %s", mode, queries[idx].in, err.Error()) - } - } - - // text protocol - for i := range queries { - _, err = dbt.db.Exec(queries[i].in) - checkWarnings(err, "text", i) - } - - var stmt *sql.Stmt - - // binary protocol - for i := range queries { - stmt, err = dbt.db.Prepare(queries[i].in) - if err != nil { - dbt.Errorf("Error on preparing query %s: %s", queries[i].in, err.Error()) - } - - _, err = stmt.Exec() - checkWarnings(err, "binary", i) - - err = stmt.Close() - if err != nil { - dbt.Errorf("Error on closing stmt for query %s: %s", queries[i].in, err.Error()) - } - } - }) -} - -func TestTLS(t *testing.T) { - tlsTest := func(dbt *DBTest) { - if err := dbt.db.Ping(); err != nil { - if err == ErrNoTLS { - dbt.Skip("Server does not support TLS") - } else { - dbt.Fatalf("Error on Ping: %s", err.Error()) - } - } - - rows := dbt.mustQuery("SHOW STATUS LIKE 'Ssl_cipher'") - - var variable, value *sql.RawBytes - for rows.Next() { - if err := rows.Scan(&variable, &value); err != nil { - dbt.Fatal(err.Error()) - } - - if value == nil { - dbt.Fatal("No Cipher") - } - } - } - - runTests(t, dsn+"&tls=skip-verify", tlsTest) - - // Verify that registering / using a custom cfg works - RegisterTLSConfig("custom-skip-verify", &tls.Config{ - InsecureSkipVerify: true, - }) - runTests(t, dsn+"&tls=custom-skip-verify", tlsTest) -} - -func TestReuseClosedConnection(t *testing.T) { - // this test does not use sql.database, it uses the driver directly - if !available { - t.Skipf("MySQL-Server not running on %s", netAddr) - } - - md := &MySQLDriver{} - conn, err := md.Open(dsn) - if err != nil { - t.Fatalf("Error connecting: %s", err.Error()) - } - stmt, err := conn.Prepare("DO 1") - if err != nil { - t.Fatalf("Error preparing statement: %s", err.Error()) - } - _, err = stmt.Exec(nil) - if err != nil { - t.Fatalf("Error executing statement: %s", err.Error()) - } - err = conn.Close() - if err != nil { - t.Fatalf("Error closing connection: %s", err.Error()) - } - - defer func() { - if err := recover(); err != nil { - t.Errorf("Panic after reusing a closed connection: %v", err) - } - }() - _, err = stmt.Exec(nil) - if err != nil && err != driver.ErrBadConn { - t.Errorf("Unexpected error '%s', expected '%s'", - err.Error(), driver.ErrBadConn.Error()) - } -} - -func TestCharset(t *testing.T) { - if !available { - t.Skipf("MySQL-Server not running on %s", netAddr) - } - - mustSetCharset := func(charsetParam, expected string) { - runTests(t, dsn+"&"+charsetParam, func(dbt *DBTest) { - rows := dbt.mustQuery("SELECT @@character_set_connection") - defer rows.Close() - - if !rows.Next() { - dbt.Fatalf("Error getting connection charset: %s", rows.Err()) - } - - var got string - rows.Scan(&got) - - if got != expected { - dbt.Fatalf("Expected connection charset %s but got %s", expected, got) - } - }) - } - - // non utf8 test - mustSetCharset("charset=ascii", "ascii") - - // when the first charset is invalid, use the second - mustSetCharset("charset=none,utf8", "utf8") - - // when the first charset is valid, use it - mustSetCharset("charset=ascii,utf8", "ascii") - mustSetCharset("charset=utf8,ascii", "utf8") -} - -func TestFailingCharset(t *testing.T) { - runTests(t, dsn+"&charset=none", func(dbt *DBTest) { - // run query to really establish connection... - _, err := dbt.db.Exec("SELECT 1") - if err == nil { - dbt.db.Close() - t.Fatalf("Connection must not succeed without a valid charset") - } - }) -} - -func TestCollation(t *testing.T) { - if !available { - t.Skipf("MySQL-Server not running on %s", netAddr) - } - - defaultCollation := "utf8_general_ci" - testCollations := []string{ - "", // do not set - defaultCollation, // driver default - "latin1_general_ci", - "binary", - "utf8_unicode_ci", - "cp1257_bin", - } - - for _, collation := range testCollations { - var expected, tdsn string - if collation != "" { - tdsn = dsn + "&collation=" + collation - expected = collation - } else { - tdsn = dsn - expected = defaultCollation - } - - runTests(t, tdsn, func(dbt *DBTest) { - var got string - if err := dbt.db.QueryRow("SELECT @@collation_connection").Scan(&got); err != nil { - dbt.Fatal(err) - } - - if got != expected { - dbt.Fatalf("Expected connection collation %s but got %s", expected, got) - } - }) - } -} - -func TestColumnsWithAlias(t *testing.T) { - runTests(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest) { - rows := dbt.mustQuery("SELECT 1 AS A") - defer rows.Close() - cols, _ := rows.Columns() - if len(cols) != 1 { - t.Fatalf("expected 1 column, got %d", len(cols)) - } - if cols[0] != "A" { - t.Fatalf("expected column name \"A\", got \"%s\"", cols[0]) - } - rows.Close() - - rows = dbt.mustQuery("SELECT * FROM (SELECT 1 AS one) AS A") - cols, _ = rows.Columns() - if len(cols) != 1 { - t.Fatalf("expected 1 column, got %d", len(cols)) - } - if cols[0] != "A.one" { - t.Fatalf("expected column name \"A.one\", got \"%s\"", cols[0]) - } - }) -} - -func TestRawBytesResultExceedsBuffer(t *testing.T) { - runTests(t, dsn, func(dbt *DBTest) { - // defaultBufSize from buffer.go - expected := strings.Repeat("abc", defaultBufSize) - - rows := dbt.mustQuery("SELECT '" + expected + "'") - defer rows.Close() - if !rows.Next() { - dbt.Error("expected result, got none") - } - var result sql.RawBytes - rows.Scan(&result) - if expected != string(result) { - dbt.Error("result did not match expected value") - } - }) -} - -func TestTimezoneConversion(t *testing.T) { - zones := []string{"UTC", "US/Central", "US/Pacific", "Local"} - - // Regression test for timezone handling - tzTest := func(dbt *DBTest) { - - // Create table - dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)") - - // Insert local time into database (should be converted) - usCentral, _ := time.LoadLocation("US/Central") - reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(usCentral) - dbt.mustExec("INSERT INTO test VALUE (?)", reftime) - - // Retrieve time from DB - rows := dbt.mustQuery("SELECT ts FROM test") - if !rows.Next() { - dbt.Fatal("Didn't get any rows out") - } - - var dbTime time.Time - err := rows.Scan(&dbTime) - if err != nil { - dbt.Fatal("Err", err) - } - - // Check that dates match - if reftime.Unix() != dbTime.Unix() { - dbt.Errorf("Times don't match.\n") - dbt.Errorf(" Now(%v)=%v\n", usCentral, reftime) - dbt.Errorf(" Now(UTC)=%v\n", dbTime) - } - } - - for _, tz := range zones { - runTests(t, dsn+"&parseTime=true&loc="+url.QueryEscape(tz), tzTest) - } -} - -// Special cases - -func TestRowsClose(t *testing.T) { - runTests(t, dsn, func(dbt *DBTest) { - rows, err := dbt.db.Query("SELECT 1") - if err != nil { - dbt.Fatal(err) - } - - err = rows.Close() - if err != nil { - dbt.Fatal(err) - } - - if rows.Next() { - dbt.Fatal("Unexpected row after rows.Close()") - } - - err = rows.Err() - if err != nil { - dbt.Fatal(err) - } - }) -} - -// dangling statements -// http://code.google.com/p/go/issues/detail?id=3865 -func TestCloseStmtBeforeRows(t *testing.T) { - runTests(t, dsn, func(dbt *DBTest) { - stmt, err := dbt.db.Prepare("SELECT 1") - if err != nil { - dbt.Fatal(err) - } - - rows, err := stmt.Query() - if err != nil { - stmt.Close() - dbt.Fatal(err) - } - defer rows.Close() - - err = stmt.Close() - if err != nil { - dbt.Fatal(err) - } - - if !rows.Next() { - dbt.Fatal("Getting row failed") - } else { - err = rows.Err() - if err != nil { - dbt.Fatal(err) - } - - var out bool - err = rows.Scan(&out) - if err != nil { - dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) - } - if out != true { - dbt.Errorf("true != %t", out) - } - } - }) -} - -// It is valid to have multiple Rows for the same Stmt -// http://code.google.com/p/go/issues/detail?id=3734 -func TestStmtMultiRows(t *testing.T) { - runTests(t, dsn, func(dbt *DBTest) { - stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0") - if err != nil { - dbt.Fatal(err) - } - - rows1, err := stmt.Query() - if err != nil { - stmt.Close() - dbt.Fatal(err) - } - defer rows1.Close() - - rows2, err := stmt.Query() - if err != nil { - stmt.Close() - dbt.Fatal(err) - } - defer rows2.Close() - - var out bool - - // 1 - if !rows1.Next() { - dbt.Fatal("1st rows1.Next failed") - } else { - err = rows1.Err() - if err != nil { - dbt.Fatal(err) - } - - err = rows1.Scan(&out) - if err != nil { - dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) - } - if out != true { - dbt.Errorf("true != %t", out) - } - } - - if !rows2.Next() { - dbt.Fatal("1st rows2.Next failed") - } else { - err = rows2.Err() - if err != nil { - dbt.Fatal(err) - } - - err = rows2.Scan(&out) - if err != nil { - dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) - } - if out != true { - dbt.Errorf("true != %t", out) - } - } - - // 2 - if !rows1.Next() { - dbt.Fatal("2nd rows1.Next failed") - } else { - err = rows1.Err() - if err != nil { - dbt.Fatal(err) - } - - err = rows1.Scan(&out) - if err != nil { - dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) - } - if out != false { - dbt.Errorf("false != %t", out) - } - - if rows1.Next() { - dbt.Fatal("Unexpected row on rows1") - } - err = rows1.Close() - if err != nil { - dbt.Fatal(err) - } - } - - if !rows2.Next() { - dbt.Fatal("2nd rows2.Next failed") - } else { - err = rows2.Err() - if err != nil { - dbt.Fatal(err) - } - - err = rows2.Scan(&out) - if err != nil { - dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) - } - if out != false { - dbt.Errorf("false != %t", out) - } - - if rows2.Next() { - dbt.Fatal("Unexpected row on rows2") - } - err = rows2.Close() - if err != nil { - dbt.Fatal(err) - } - } - }) -} - -// Regression test for -// * more than 32 NULL parameters (issue 209) -// * more parameters than fit into the buffer (issue 201) -func TestPreparedManyCols(t *testing.T) { - const numParams = defaultBufSize - runTests(t, dsn, func(dbt *DBTest) { - query := "SELECT ?" + strings.Repeat(",?", numParams-1) - stmt, err := dbt.db.Prepare(query) - if err != nil { - dbt.Fatal(err) - } - defer stmt.Close() - // create more parameters than fit into the buffer - // which will take nil-values - params := make([]interface{}, numParams) - rows, err := stmt.Query(params...) - if err != nil { - stmt.Close() - dbt.Fatal(err) - } - defer rows.Close() - }) -} - -func TestConcurrent(t *testing.T) { - if enabled, _ := readBool(os.Getenv("MYSQL_TEST_CONCURRENT")); !enabled { - t.Skip("MYSQL_TEST_CONCURRENT env var not set") - } - - runTests(t, dsn, func(dbt *DBTest) { - var max int - err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max) - if err != nil { - dbt.Fatalf("%s", err.Error()) - } - dbt.Logf("Testing up to %d concurrent connections \r\n", max) - - var remaining, succeeded int32 = int32(max), 0 - - var wg sync.WaitGroup - wg.Add(max) - - var fatalError string - var once sync.Once - fatalf := func(s string, vals ...interface{}) { - once.Do(func() { - fatalError = fmt.Sprintf(s, vals...) - }) - } - - for i := 0; i < max; i++ { - go func(id int) { - defer wg.Done() - - tx, err := dbt.db.Begin() - atomic.AddInt32(&remaining, -1) - - if err != nil { - if err.Error() != "Error 1040: Too many connections" { - fatalf("Error on Conn %d: %s", id, err.Error()) - } - return - } - - // keep the connection busy until all connections are open - for remaining > 0 { - if _, err = tx.Exec("DO 1"); err != nil { - fatalf("Error on Conn %d: %s", id, err.Error()) - return - } - } - - if err = tx.Commit(); err != nil { - fatalf("Error on Conn %d: %s", id, err.Error()) - return - } - - // everything went fine with this connection - atomic.AddInt32(&succeeded, 1) - }(i) - } - - // wait until all conections are open - wg.Wait() - - if fatalError != "" { - dbt.Fatal(fatalError) - } - - dbt.Logf("Reached %d concurrent connections\r\n", succeeded) - }) -} - -// Tests custom dial functions -func TestCustomDial(t *testing.T) { - if !available { - t.Skipf("MySQL-Server not running on %s", netAddr) - } - - // our custom dial function which justs wraps net.Dial here - RegisterDial("mydial", func(addr string) (net.Conn, error) { - return net.Dial(prot, addr) - }) - - db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s&strict=true", user, pass, addr, dbname)) - if err != nil { - t.Fatalf("Error connecting: %s", err.Error()) - } - defer db.Close() - - if _, err = db.Exec("DO 1"); err != nil { - t.Fatalf("Connection failed: %s", err.Error()) - } -} - -func TestSqlInjection(t *testing.T) { - createTest := func(arg string) func(dbt *DBTest) { - return func(dbt *DBTest) { - dbt.mustExec("CREATE TABLE test (v INTEGER)") - dbt.mustExec("INSERT INTO test VALUES (?)", 1) - - var v int - // NULL can't be equal to anything, the idea here is to inject query so it returns row - // This test verifies that escapeQuotes and escapeBackslash are working properly - err := dbt.db.QueryRow("SELECT v FROM test WHERE NULL = ?", arg).Scan(&v) - if err == sql.ErrNoRows { - return // success, sql injection failed - } else if err == nil { - dbt.Errorf("Sql injection successful with arg: %s", arg) - } else { - dbt.Errorf("Error running query with arg: %s; err: %s", arg, err.Error()) - } - } - } - - dsns := []string{ - dsn, - dsn + "&sql_mode=NO_BACKSLASH_ESCAPES", - } - for _, testdsn := range dsns { - runTests(t, testdsn, createTest("1 OR 1=1")) - runTests(t, testdsn, createTest("' OR '1'='1")) - } -} - -// Test if inserted data is correctly retrieved after being escaped -func TestInsertRetrieveEscapedData(t *testing.T) { - testData := func(dbt *DBTest) { - dbt.mustExec("CREATE TABLE test (v VARCHAR(255))") - - // All sequences that are escaped by escapeQuotes and escapeBackslash - v := "foo \x00\n\r\x1a\"'\\" - dbt.mustExec("INSERT INTO test VALUES (?)", v) - - var out string - err := dbt.db.QueryRow("SELECT v FROM test").Scan(&out) - if err != nil { - dbt.Fatalf("%s", err.Error()) - } - - if out != v { - dbt.Errorf("%q != %q", out, v) - } - } - - dsns := []string{ - dsn, - dsn + "&sql_mode=NO_BACKSLASH_ESCAPES", - } - for _, testdsn := range dsns { - runTests(t, testdsn, testData) - } -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go deleted file mode 100644 index 44cf30db6..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go +++ /dev/null @@ -1,131 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "database/sql/driver" - "errors" - "fmt" - "io" - "log" - "os" -) - -// Various errors the driver might return. Can change between driver versions. -var ( - ErrInvalidConn = errors.New("Invalid Connection") - ErrMalformPkt = errors.New("Malformed Packet") - ErrNoTLS = errors.New("TLS encryption requested but server does not support TLS") - ErrOldPassword = errors.New("This user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords") - ErrCleartextPassword = errors.New("This user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN.") - ErrUnknownPlugin = errors.New("The authentication plugin is not supported.") - ErrOldProtocol = errors.New("MySQL-Server does not support required Protocol 41+") - ErrPktSync = errors.New("Commands out of sync. You can't run this command now") - ErrPktSyncMul = errors.New("Commands out of sync. Did you run multiple statements at once?") - ErrPktTooLarge = errors.New("Packet for query is too large. You can change this value on the server by adjusting the 'max_allowed_packet' variable.") - ErrBusyBuffer = errors.New("Busy buffer") -) - -var errLog Logger = log.New(os.Stderr, "[MySQL] ", log.Ldate|log.Ltime|log.Lshortfile) - -// Logger is used to log critical error messages. -type Logger interface { - Print(v ...interface{}) -} - -// SetLogger is used to set the logger for critical errors. -// The initial logger is os.Stderr. -func SetLogger(logger Logger) error { - if logger == nil { - return errors.New("logger is nil") - } - errLog = logger - return nil -} - -// MySQLError is an error type which represents a single MySQL error -type MySQLError struct { - Number uint16 - Message string -} - -func (me *MySQLError) Error() string { - return fmt.Sprintf("Error %d: %s", me.Number, me.Message) -} - -// MySQLWarnings is an error type which represents a group of one or more MySQL -// warnings -type MySQLWarnings []MySQLWarning - -func (mws MySQLWarnings) Error() string { - var msg string - for i, warning := range mws { - if i > 0 { - msg += "\r\n" - } - msg += fmt.Sprintf( - "%s %s: %s", - warning.Level, - warning.Code, - warning.Message, - ) - } - return msg -} - -// MySQLWarning is an error type which represents a single MySQL warning. -// Warnings are returned in groups only. See MySQLWarnings -type MySQLWarning struct { - Level string - Code string - Message string -} - -func (mc *mysqlConn) getWarnings() (err error) { - rows, err := mc.Query("SHOW WARNINGS", nil) - if err != nil { - return - } - - var warnings = MySQLWarnings{} - var values = make([]driver.Value, 3) - - for { - err = rows.Next(values) - switch err { - case nil: - warning := MySQLWarning{} - - if raw, ok := values[0].([]byte); ok { - warning.Level = string(raw) - } else { - warning.Level = fmt.Sprintf("%s", values[0]) - } - if raw, ok := values[1].([]byte); ok { - warning.Code = string(raw) - } else { - warning.Code = fmt.Sprintf("%s", values[1]) - } - if raw, ok := values[2].([]byte); ok { - warning.Message = string(raw) - } else { - warning.Message = fmt.Sprintf("%s", values[0]) - } - - warnings = append(warnings, warning) - - case io.EOF: - return warnings - - default: - rows.Close() - return - } - } -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go deleted file mode 100644 index 96f9126d6..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "bytes" - "log" - "testing" -) - -func TestErrorsSetLogger(t *testing.T) { - previous := errLog - defer func() { - errLog = previous - }() - - // set up logger - const expected = "prefix: test\n" - buffer := bytes.NewBuffer(make([]byte, 0, 64)) - logger := log.New(buffer, "prefix: ", 0) - - // print - SetLogger(logger) - errLog.Print("test") - - // check result - if actual := buffer.String(); actual != expected { - t.Errorf("expected %q, got %q", expected, actual) - } -} - -func TestErrorsStrictIgnoreNotes(t *testing.T) { - runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) { - dbt.mustExec("DROP TABLE IF EXISTS does_not_exist") - }) -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go deleted file mode 100644 index a2dedb3c0..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go +++ /dev/null @@ -1,164 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "fmt" - "io" - "os" - "strings" -) - -var ( - fileRegister map[string]bool - readerRegister map[string]func() io.Reader -) - -// RegisterLocalFile adds the given file to the file whitelist, -// so that it can be used by "LOAD DATA LOCAL INFILE ". -// Alternatively you can allow the use of all local files with -// the DSN parameter 'allowAllFiles=true' -// -// filePath := "/home/gopher/data.csv" -// mysql.RegisterLocalFile(filePath) -// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo") -// if err != nil { -// ... -// -func RegisterLocalFile(filePath string) { - // lazy map init - if fileRegister == nil { - fileRegister = make(map[string]bool) - } - - fileRegister[strings.Trim(filePath, `"`)] = true -} - -// DeregisterLocalFile removes the given filepath from the whitelist. -func DeregisterLocalFile(filePath string) { - delete(fileRegister, strings.Trim(filePath, `"`)) -} - -// RegisterReaderHandler registers a handler function which is used -// to receive a io.Reader. -// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::". -// If the handler returns a io.ReadCloser Close() is called when the -// request is finished. -// -// mysql.RegisterReaderHandler("data", func() io.Reader { -// var csvReader io.Reader // Some Reader that returns CSV data -// ... // Open Reader here -// return csvReader -// }) -// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo") -// if err != nil { -// ... -// -func RegisterReaderHandler(name string, handler func() io.Reader) { - // lazy map init - if readerRegister == nil { - readerRegister = make(map[string]func() io.Reader) - } - - readerRegister[name] = handler -} - -// DeregisterReaderHandler removes the ReaderHandler function with -// the given name from the registry. -func DeregisterReaderHandler(name string) { - delete(readerRegister, name) -} - -func deferredClose(err *error, closer io.Closer) { - closeErr := closer.Close() - if *err == nil { - *err = closeErr - } -} - -func (mc *mysqlConn) handleInFileRequest(name string) (err error) { - var rdr io.Reader - var data []byte - - if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader - // The server might return an an absolute path. See issue #355. - name = name[idx+8:] - - if handler, inMap := readerRegister[name]; inMap { - rdr = handler() - if rdr != nil { - data = make([]byte, 4+mc.maxWriteSize) - - if cl, ok := rdr.(io.Closer); ok { - defer deferredClose(&err, cl) - } - } else { - err = fmt.Errorf("Reader '%s' is ", name) - } - } else { - err = fmt.Errorf("Reader '%s' is not registered", name) - } - } else { // File - name = strings.Trim(name, `"`) - if mc.cfg.allowAllFiles || fileRegister[name] { - var file *os.File - var fi os.FileInfo - - if file, err = os.Open(name); err == nil { - defer deferredClose(&err, file) - - // get file size - if fi, err = file.Stat(); err == nil { - rdr = file - if fileSize := int(fi.Size()); fileSize <= mc.maxWriteSize { - data = make([]byte, 4+fileSize) - } else if fileSize <= mc.maxPacketAllowed { - data = make([]byte, 4+mc.maxWriteSize) - } else { - err = fmt.Errorf("Local File '%s' too large: Size: %d, Max: %d", name, fileSize, mc.maxPacketAllowed) - } - } - } - } else { - err = fmt.Errorf("Local File '%s' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files", name) - } - } - - // send content packets - if err == nil { - var n int - for err == nil { - n, err = rdr.Read(data[4:]) - if n > 0 { - if ioErr := mc.writePacket(data[:4+n]); ioErr != nil { - return ioErr - } - } - } - if err == io.EOF { - err = nil - } - } - - // send empty packet (termination) - if data == nil { - data = make([]byte, 4) - } - if ioErr := mc.writePacket(data[:4]); ioErr != nil { - return ioErr - } - - // read OK packet - if err == nil { - return mc.readResultOK() - } else { - mc.readPacket() - } - return err -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go deleted file mode 100644 index 14395bf9a..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go +++ /dev/null @@ -1,1179 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "bytes" - "crypto/tls" - "database/sql/driver" - "encoding/binary" - "fmt" - "io" - "math" - "time" -) - -// Packets documentation: -// http://dev.mysql.com/doc/internals/en/client-server-protocol.html - -// Read packet to buffer 'data' -func (mc *mysqlConn) readPacket() ([]byte, error) { - var payload []byte - for { - // Read packet header - data, err := mc.buf.readNext(4) - if err != nil { - errLog.Print(err) - mc.Close() - return nil, driver.ErrBadConn - } - - // Packet Length [24 bit] - pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) - - if pktLen < 1 { - errLog.Print(ErrMalformPkt) - mc.Close() - return nil, driver.ErrBadConn - } - - // Check Packet Sync [8 bit] - if data[3] != mc.sequence { - if data[3] > mc.sequence { - return nil, ErrPktSyncMul - } else { - return nil, ErrPktSync - } - } - mc.sequence++ - - // Read packet body [pktLen bytes] - data, err = mc.buf.readNext(pktLen) - if err != nil { - errLog.Print(err) - mc.Close() - return nil, driver.ErrBadConn - } - - isLastPacket := (pktLen < maxPacketSize) - - // Zero allocations for non-splitting packets - if isLastPacket && payload == nil { - return data, nil - } - - payload = append(payload, data...) - - if isLastPacket { - return payload, nil - } - } -} - -// Write packet buffer 'data' -func (mc *mysqlConn) writePacket(data []byte) error { - pktLen := len(data) - 4 - - if pktLen > mc.maxPacketAllowed { - return ErrPktTooLarge - } - - for { - var size int - if pktLen >= maxPacketSize { - data[0] = 0xff - data[1] = 0xff - data[2] = 0xff - size = maxPacketSize - } else { - data[0] = byte(pktLen) - data[1] = byte(pktLen >> 8) - data[2] = byte(pktLen >> 16) - size = pktLen - } - data[3] = mc.sequence - - // Write packet - n, err := mc.netConn.Write(data[:4+size]) - if err == nil && n == 4+size { - mc.sequence++ - if size != maxPacketSize { - return nil - } - pktLen -= size - data = data[size:] - continue - } - - // Handle error - if err == nil { // n != len(data) - errLog.Print(ErrMalformPkt) - } else { - errLog.Print(err) - } - return driver.ErrBadConn - } -} - -/****************************************************************************** -* Initialisation Process * -******************************************************************************/ - -// Handshake Initialization Packet -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake -func (mc *mysqlConn) readInitPacket() ([]byte, error) { - data, err := mc.readPacket() - if err != nil { - return nil, err - } - - if data[0] == iERR { - return nil, mc.handleErrorPacket(data) - } - - // protocol version [1 byte] - if data[0] < minProtocolVersion { - return nil, fmt.Errorf( - "Unsupported MySQL Protocol Version %d. Protocol Version %d or higher is required", - data[0], - minProtocolVersion, - ) - } - - // server version [null terminated string] - // connection id [4 bytes] - pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4 - - // first part of the password cipher [8 bytes] - cipher := data[pos : pos+8] - - // (filler) always 0x00 [1 byte] - pos += 8 + 1 - - // capability flags (lower 2 bytes) [2 bytes] - mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) - if mc.flags&clientProtocol41 == 0 { - return nil, ErrOldProtocol - } - if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { - return nil, ErrNoTLS - } - pos += 2 - - if len(data) > pos { - // character set [1 byte] - // status flags [2 bytes] - // capability flags (upper 2 bytes) [2 bytes] - // length of auth-plugin-data [1 byte] - // reserved (all [00]) [10 bytes] - pos += 1 + 2 + 2 + 1 + 10 - - // second part of the password cipher [mininum 13 bytes], - // where len=MAX(13, length of auth-plugin-data - 8) - // - // The web documentation is ambiguous about the length. However, - // according to mysql-5.7/sql/auth/sql_authentication.cc line 538, - // the 13th byte is "\0 byte, terminating the second part of - // a scramble". So the second part of the password cipher is - // a NULL terminated string that's at least 13 bytes with the - // last byte being NULL. - // - // The official Python library uses the fixed length 12 - // which seems to work but technically could have a hidden bug. - cipher = append(cipher, data[pos:pos+12]...) - - // TODO: Verify string termination - // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2) - // \NUL otherwise - // - //if data[len(data)-1] == 0 { - // return - //} - //return ErrMalformPkt - - // make a memory safe copy of the cipher slice - var b [20]byte - copy(b[:], cipher) - return b[:], nil - } - - // make a memory safe copy of the cipher slice - var b [8]byte - copy(b[:], cipher) - return b[:], nil -} - -// Client Authentication Packet -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse -func (mc *mysqlConn) writeAuthPacket(cipher []byte) error { - // Adjust client flags based on server support - clientFlags := clientProtocol41 | - clientSecureConn | - clientLongPassword | - clientTransactions | - clientLocalFiles | - clientPluginAuth | - mc.flags&clientLongFlag - - if mc.cfg.clientFoundRows { - clientFlags |= clientFoundRows - } - - // To enable TLS / SSL - if mc.cfg.tls != nil { - clientFlags |= clientSSL - } - - // User Password - scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.passwd)) - - pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.user) + 1 + 1 + len(scrambleBuff) + 21 + 1 - - // To specify a db name - if n := len(mc.cfg.dbname); n > 0 { - clientFlags |= clientConnectWithDB - pktLen += n + 1 - } - - // Calculate packet length and get buffer with that size - data := mc.buf.takeSmallBuffer(pktLen + 4) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // ClientFlags [32 bit] - data[4] = byte(clientFlags) - data[5] = byte(clientFlags >> 8) - data[6] = byte(clientFlags >> 16) - data[7] = byte(clientFlags >> 24) - - // MaxPacketSize [32 bit] (none) - data[8] = 0x00 - data[9] = 0x00 - data[10] = 0x00 - data[11] = 0x00 - - // Charset [1 byte] - data[12] = mc.cfg.collation - - // SSL Connection Request Packet - // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest - if mc.cfg.tls != nil { - // Send TLS / SSL request packet - if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil { - return err - } - - // Switch to TLS - tlsConn := tls.Client(mc.netConn, mc.cfg.tls) - if err := tlsConn.Handshake(); err != nil { - return err - } - mc.netConn = tlsConn - mc.buf.rd = tlsConn - } - - // Filler [23 bytes] (all 0x00) - pos := 13 + 23 - - // User [null terminated string] - if len(mc.cfg.user) > 0 { - pos += copy(data[pos:], mc.cfg.user) - } - data[pos] = 0x00 - pos++ - - // ScrambleBuffer [length encoded integer] - data[pos] = byte(len(scrambleBuff)) - pos += 1 + copy(data[pos+1:], scrambleBuff) - - // Databasename [null terminated string] - if len(mc.cfg.dbname) > 0 { - pos += copy(data[pos:], mc.cfg.dbname) - data[pos] = 0x00 - pos++ - } - - // Assume native client during response - pos += copy(data[pos:], "mysql_native_password") - data[pos] = 0x00 - - // Send Auth packet - return mc.writePacket(data) -} - -// Client old authentication packet -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse -func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error { - // User password - scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.passwd)) - - // Calculate the packet length and add a tailing 0 - pktLen := len(scrambleBuff) + 1 - data := mc.buf.takeSmallBuffer(4 + pktLen) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add the scrambled password [null terminated string] - copy(data[4:], scrambleBuff) - data[4+pktLen-1] = 0x00 - - return mc.writePacket(data) -} - -// Client clear text authentication packet -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse -func (mc *mysqlConn) writeClearAuthPacket() error { - // Calculate the packet length and add a tailing 0 - pktLen := len(mc.cfg.passwd) + 1 - data := mc.buf.takeSmallBuffer(4 + pktLen) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add the clear password [null terminated string] - copy(data[4:], mc.cfg.passwd) - data[4+pktLen-1] = 0x00 - - return mc.writePacket(data) -} - -/****************************************************************************** -* Command Packets * -******************************************************************************/ - -func (mc *mysqlConn) writeCommandPacket(command byte) error { - // Reset Packet Sequence - mc.sequence = 0 - - data := mc.buf.takeSmallBuffer(4 + 1) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add command byte - data[4] = command - - // Send CMD packet - return mc.writePacket(data) -} - -func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { - // Reset Packet Sequence - mc.sequence = 0 - - pktLen := 1 + len(arg) - data := mc.buf.takeBuffer(pktLen + 4) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add command byte - data[4] = command - - // Add arg - copy(data[5:], arg) - - // Send CMD packet - return mc.writePacket(data) -} - -func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { - // Reset Packet Sequence - mc.sequence = 0 - - data := mc.buf.takeSmallBuffer(4 + 1 + 4) - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // Add command byte - data[4] = command - - // Add arg [32 bit] - data[5] = byte(arg) - data[6] = byte(arg >> 8) - data[7] = byte(arg >> 16) - data[8] = byte(arg >> 24) - - // Send CMD packet - return mc.writePacket(data) -} - -/****************************************************************************** -* Result Packets * -******************************************************************************/ - -// Returns error if Packet is not an 'Result OK'-Packet -func (mc *mysqlConn) readResultOK() error { - data, err := mc.readPacket() - if err == nil { - // packet indicator - switch data[0] { - - case iOK: - return mc.handleOkPacket(data) - - case iEOF: - if len(data) > 1 { - plugin := string(data[1:bytes.IndexByte(data, 0x00)]) - if plugin == "mysql_old_password" { - // using old_passwords - return ErrOldPassword - } else if plugin == "mysql_clear_password" { - // using clear text password - return ErrCleartextPassword - } else { - return ErrUnknownPlugin - } - } else { - return ErrOldPassword - } - - default: // Error otherwise - return mc.handleErrorPacket(data) - } - } - return err -} - -// Result Set Header Packet -// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset -func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { - data, err := mc.readPacket() - if err == nil { - switch data[0] { - - case iOK: - return 0, mc.handleOkPacket(data) - - case iERR: - return 0, mc.handleErrorPacket(data) - - case iLocalInFile: - return 0, mc.handleInFileRequest(string(data[1:])) - } - - // column count - num, _, n := readLengthEncodedInteger(data) - if n-len(data) == 0 { - return int(num), nil - } - - return 0, ErrMalformPkt - } - return 0, err -} - -// Error Packet -// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet -func (mc *mysqlConn) handleErrorPacket(data []byte) error { - if data[0] != iERR { - return ErrMalformPkt - } - - // 0xff [1 byte] - - // Error Number [16 bit uint] - errno := binary.LittleEndian.Uint16(data[1:3]) - - pos := 3 - - // SQL State [optional: # + 5bytes string] - if data[3] == 0x23 { - //sqlstate := string(data[4 : 4+5]) - pos = 9 - } - - // Error Message [string] - return &MySQLError{ - Number: errno, - Message: string(data[pos:]), - } -} - -// Ok Packet -// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet -func (mc *mysqlConn) handleOkPacket(data []byte) error { - var n, m int - - // 0x00 [1 byte] - - // Affected rows [Length Coded Binary] - mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) - - // Insert id [Length Coded Binary] - mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) - - // server_status [2 bytes] - mc.status = statusFlag(data[1+n+m]) | statusFlag(data[1+n+m+1])<<8 - - // warning count [2 bytes] - if !mc.strict { - return nil - } else { - pos := 1 + n + m + 2 - if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 { - return mc.getWarnings() - } - return nil - } -} - -// Read Packets as Field Packets until EOF-Packet or an Error appears -// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 -func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { - columns := make([]mysqlField, count) - - for i := 0; ; i++ { - data, err := mc.readPacket() - if err != nil { - return nil, err - } - - // EOF Packet - if data[0] == iEOF && (len(data) == 5 || len(data) == 1) { - if i == count { - return columns, nil - } - return nil, fmt.Errorf("ColumnsCount mismatch n:%d len:%d", count, len(columns)) - } - - // Catalog - pos, err := skipLengthEncodedString(data) - if err != nil { - return nil, err - } - - // Database [len coded string] - n, err := skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - - // Table [len coded string] - if mc.cfg.columnsWithAlias { - tableName, _, n, err := readLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - columns[i].tableName = string(tableName) - } else { - n, err = skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - } - - // Original table [len coded string] - n, err = skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - - // Name [len coded string] - name, _, n, err := readLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - columns[i].name = string(name) - pos += n - - // Original name [len coded string] - n, err = skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - - // Filler [uint8] - // Charset [charset, collation uint8] - // Length [uint32] - pos += n + 1 + 2 + 4 - - // Field type [uint8] - columns[i].fieldType = data[pos] - pos++ - - // Flags [uint16] - columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) - pos += 2 - - // Decimals [uint8] - columns[i].decimals = data[pos] - //pos++ - - // Default value [len coded binary] - //if pos < len(data) { - // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:]) - //} - } -} - -// Read Packets as Field Packets until EOF-Packet or an Error appears -// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow -func (rows *textRows) readRow(dest []driver.Value) error { - mc := rows.mc - - data, err := mc.readPacket() - if err != nil { - return err - } - - // EOF Packet - if data[0] == iEOF && len(data) == 5 { - rows.mc = nil - return io.EOF - } - if data[0] == iERR { - rows.mc = nil - return mc.handleErrorPacket(data) - } - - // RowSet Packet - var n int - var isNull bool - pos := 0 - - for i := range dest { - // Read bytes and convert to string - dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) - pos += n - if err == nil { - if !isNull { - if !mc.parseTime { - continue - } else { - switch rows.columns[i].fieldType { - case fieldTypeTimestamp, fieldTypeDateTime, - fieldTypeDate, fieldTypeNewDate: - dest[i], err = parseDateTime( - string(dest[i].([]byte)), - mc.cfg.loc, - ) - if err == nil { - continue - } - default: - continue - } - } - - } else { - dest[i] = nil - continue - } - } - return err // err != nil - } - - return nil -} - -// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read -func (mc *mysqlConn) readUntilEOF() error { - for { - data, err := mc.readPacket() - - // No Err and no EOF Packet - if err == nil && data[0] != iEOF { - continue - } - return err // Err or EOF - } -} - -/****************************************************************************** -* Prepared Statements * -******************************************************************************/ - -// Prepare Result Packets -// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html -func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { - data, err := stmt.mc.readPacket() - if err == nil { - // packet indicator [1 byte] - if data[0] != iOK { - return 0, stmt.mc.handleErrorPacket(data) - } - - // statement id [4 bytes] - stmt.id = binary.LittleEndian.Uint32(data[1:5]) - - // Column count [16 bit uint] - columnCount := binary.LittleEndian.Uint16(data[5:7]) - - // Param count [16 bit uint] - stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9])) - - // Reserved [8 bit] - - // Warning count [16 bit uint] - if !stmt.mc.strict { - return columnCount, nil - } else { - // Check for warnings count > 0, only available in MySQL > 4.1 - if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 { - return columnCount, stmt.mc.getWarnings() - } - return columnCount, nil - } - } - return 0, err -} - -// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html -func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { - maxLen := stmt.mc.maxPacketAllowed - 1 - pktLen := maxLen - - // After the header (bytes 0-3) follows before the data: - // 1 byte command - // 4 bytes stmtID - // 2 bytes paramID - const dataOffset = 1 + 4 + 2 - - // Can not use the write buffer since - // a) the buffer is too small - // b) it is in use - data := make([]byte, 4+1+4+2+len(arg)) - - copy(data[4+dataOffset:], arg) - - for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset { - if dataOffset+argLen < maxLen { - pktLen = dataOffset + argLen - } - - stmt.mc.sequence = 0 - // Add command byte [1 byte] - data[4] = comStmtSendLongData - - // Add stmtID [32 bit] - data[5] = byte(stmt.id) - data[6] = byte(stmt.id >> 8) - data[7] = byte(stmt.id >> 16) - data[8] = byte(stmt.id >> 24) - - // Add paramID [16 bit] - data[9] = byte(paramID) - data[10] = byte(paramID >> 8) - - // Send CMD packet - err := stmt.mc.writePacket(data[:4+pktLen]) - if err == nil { - data = data[pktLen-dataOffset:] - continue - } - return err - - } - - // Reset Packet Sequence - stmt.mc.sequence = 0 - return nil -} - -// Execute Prepared Statement -// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html -func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { - if len(args) != stmt.paramCount { - return fmt.Errorf( - "Arguments count mismatch (Got: %d Has: %d)", - len(args), - stmt.paramCount, - ) - } - - const minPktLen = 4 + 1 + 4 + 1 + 4 - mc := stmt.mc - - // Reset packet-sequence - mc.sequence = 0 - - var data []byte - - if len(args) == 0 { - data = mc.buf.takeBuffer(minPktLen) - } else { - data = mc.buf.takeCompleteBuffer() - } - if data == nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(ErrBusyBuffer) - return driver.ErrBadConn - } - - // command [1 byte] - data[4] = comStmtExecute - - // statement_id [4 bytes] - data[5] = byte(stmt.id) - data[6] = byte(stmt.id >> 8) - data[7] = byte(stmt.id >> 16) - data[8] = byte(stmt.id >> 24) - - // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] - data[9] = 0x00 - - // iteration_count (uint32(1)) [4 bytes] - data[10] = 0x01 - data[11] = 0x00 - data[12] = 0x00 - data[13] = 0x00 - - if len(args) > 0 { - pos := minPktLen - - var nullMask []byte - if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) { - // buffer has to be extended but we don't know by how much so - // we depend on append after all data with known sizes fit. - // We stop at that because we deal with a lot of columns here - // which makes the required allocation size hard to guess. - tmp := make([]byte, pos+maskLen+typesLen) - copy(tmp[:pos], data[:pos]) - data = tmp - nullMask = data[pos : pos+maskLen] - pos += maskLen - } else { - nullMask = data[pos : pos+maskLen] - for i := 0; i < maskLen; i++ { - nullMask[i] = 0 - } - pos += maskLen - } - - // newParameterBoundFlag 1 [1 byte] - data[pos] = 0x01 - pos++ - - // type of each parameter [len(args)*2 bytes] - paramTypes := data[pos:] - pos += len(args) * 2 - - // value of each parameter [n bytes] - paramValues := data[pos:pos] - valuesCap := cap(paramValues) - - for i, arg := range args { - // build NULL-bitmap - if arg == nil { - nullMask[i/8] |= 1 << (uint(i) & 7) - paramTypes[i+i] = fieldTypeNULL - paramTypes[i+i+1] = 0x00 - continue - } - - // cache types and values - switch v := arg.(type) { - case int64: - paramTypes[i+i] = fieldTypeLongLong - paramTypes[i+i+1] = 0x00 - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - uint64(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(uint64(v))..., - ) - } - - case float64: - paramTypes[i+i] = fieldTypeDouble - paramTypes[i+i+1] = 0x00 - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - math.Float64bits(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(math.Float64bits(v))..., - ) - } - - case bool: - paramTypes[i+i] = fieldTypeTiny - paramTypes[i+i+1] = 0x00 - - if v { - paramValues = append(paramValues, 0x01) - } else { - paramValues = append(paramValues, 0x00) - } - - case []byte: - // Common case (non-nil value) first - if v != nil { - paramTypes[i+i] = fieldTypeString - paramTypes[i+i+1] = 0x00 - - if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 { - paramValues = appendLengthEncodedInteger(paramValues, - uint64(len(v)), - ) - paramValues = append(paramValues, v...) - } else { - if err := stmt.writeCommandLongData(i, v); err != nil { - return err - } - } - continue - } - - // Handle []byte(nil) as a NULL value - nullMask[i/8] |= 1 << (uint(i) & 7) - paramTypes[i+i] = fieldTypeNULL - paramTypes[i+i+1] = 0x00 - - case string: - paramTypes[i+i] = fieldTypeString - paramTypes[i+i+1] = 0x00 - - if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 { - paramValues = appendLengthEncodedInteger(paramValues, - uint64(len(v)), - ) - paramValues = append(paramValues, v...) - } else { - if err := stmt.writeCommandLongData(i, []byte(v)); err != nil { - return err - } - } - - case time.Time: - paramTypes[i+i] = fieldTypeString - paramTypes[i+i+1] = 0x00 - - var val []byte - if v.IsZero() { - val = []byte("0000-00-00") - } else { - val = []byte(v.In(mc.cfg.loc).Format(timeFormat)) - } - - paramValues = appendLengthEncodedInteger(paramValues, - uint64(len(val)), - ) - paramValues = append(paramValues, val...) - - default: - return fmt.Errorf("Can't convert type: %T", arg) - } - } - - // Check if param values exceeded the available buffer - // In that case we must build the data packet with the new values buffer - if valuesCap != cap(paramValues) { - data = append(data[:pos], paramValues...) - mc.buf.buf = data - } - - pos += len(paramValues) - data = data[:pos] - } - - return mc.writePacket(data) -} - -// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html -func (rows *binaryRows) readRow(dest []driver.Value) error { - data, err := rows.mc.readPacket() - if err != nil { - return err - } - - // packet indicator [1 byte] - if data[0] != iOK { - rows.mc = nil - // EOF Packet - if data[0] == iEOF && len(data) == 5 { - return io.EOF - } - - // Error otherwise - return rows.mc.handleErrorPacket(data) - } - - // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] - pos := 1 + (len(dest)+7+2)>>3 - nullMask := data[1:pos] - - for i := range dest { - // Field is NULL - // (byte >> bit-pos) % 2 == 1 - if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 { - dest[i] = nil - continue - } - - // Convert to byte-coded string - switch rows.columns[i].fieldType { - case fieldTypeNULL: - dest[i] = nil - continue - - // Numeric Types - case fieldTypeTiny: - if rows.columns[i].flags&flagUnsigned != 0 { - dest[i] = int64(data[pos]) - } else { - dest[i] = int64(int8(data[pos])) - } - pos++ - continue - - case fieldTypeShort, fieldTypeYear: - if rows.columns[i].flags&flagUnsigned != 0 { - dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2])) - } else { - dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2]))) - } - pos += 2 - continue - - case fieldTypeInt24, fieldTypeLong: - if rows.columns[i].flags&flagUnsigned != 0 { - dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4])) - } else { - dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4]))) - } - pos += 4 - continue - - case fieldTypeLongLong: - if rows.columns[i].flags&flagUnsigned != 0 { - val := binary.LittleEndian.Uint64(data[pos : pos+8]) - if val > math.MaxInt64 { - dest[i] = uint64ToString(val) - } else { - dest[i] = int64(val) - } - } else { - dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) - } - pos += 8 - continue - - case fieldTypeFloat: - dest[i] = float64(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))) - pos += 4 - continue - - case fieldTypeDouble: - dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8])) - pos += 8 - continue - - // Length coded Binary Strings - case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, - fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, - fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, - fieldTypeVarString, fieldTypeString, fieldTypeGeometry: - var isNull bool - var n int - dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) - pos += n - if err == nil { - if !isNull { - continue - } else { - dest[i] = nil - continue - } - } - return err - - case - fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD - fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal] - fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal] - - num, isNull, n := readLengthEncodedInteger(data[pos:]) - pos += n - - switch { - case isNull: - dest[i] = nil - continue - case rows.columns[i].fieldType == fieldTypeTime: - // database/sql does not support an equivalent to TIME, return a string - var dstlen uint8 - switch decimals := rows.columns[i].decimals; decimals { - case 0x00, 0x1f: - dstlen = 8 - case 1, 2, 3, 4, 5, 6: - dstlen = 8 + 1 + decimals - default: - return fmt.Errorf( - "MySQL protocol error, illegal decimals value %d", - rows.columns[i].decimals, - ) - } - dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true) - case rows.mc.parseTime: - dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.loc) - default: - var dstlen uint8 - if rows.columns[i].fieldType == fieldTypeDate { - dstlen = 10 - } else { - switch decimals := rows.columns[i].decimals; decimals { - case 0x00, 0x1f: - dstlen = 19 - case 1, 2, 3, 4, 5, 6: - dstlen = 19 + 1 + decimals - default: - return fmt.Errorf( - "MySQL protocol error, illegal decimals value %d", - rows.columns[i].decimals, - ) - } - } - dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, false) - } - - if err == nil { - pos += int(num) - continue - } else { - return err - } - - // Please report if this happens! - default: - return fmt.Errorf("Unknown FieldType %d", rows.columns[i].fieldType) - } - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go deleted file mode 100644 index c6438d034..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go +++ /dev/null @@ -1,22 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -type mysqlResult struct { - affectedRows int64 - insertId int64 -} - -func (res *mysqlResult) LastInsertId() (int64, error) { - return res.insertId, nil -} - -func (res *mysqlResult) RowsAffected() (int64, error) { - return res.affectedRows, nil -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go deleted file mode 100644 index ba606e146..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go +++ /dev/null @@ -1,106 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "database/sql/driver" - "io" -) - -type mysqlField struct { - tableName string - name string - flags fieldFlag - fieldType byte - decimals byte -} - -type mysqlRows struct { - mc *mysqlConn - columns []mysqlField -} - -type binaryRows struct { - mysqlRows -} - -type textRows struct { - mysqlRows -} - -type emptyRows struct{} - -func (rows *mysqlRows) Columns() []string { - columns := make([]string, len(rows.columns)) - if rows.mc.cfg.columnsWithAlias { - for i := range columns { - if tableName := rows.columns[i].tableName; len(tableName) > 0 { - columns[i] = tableName + "." + rows.columns[i].name - } else { - columns[i] = rows.columns[i].name - } - } - } else { - for i := range columns { - columns[i] = rows.columns[i].name - } - } - return columns -} - -func (rows *mysqlRows) Close() error { - mc := rows.mc - if mc == nil { - return nil - } - if mc.netConn == nil { - return ErrInvalidConn - } - - // Remove unread packets from stream - err := mc.readUntilEOF() - rows.mc = nil - return err -} - -func (rows *binaryRows) Next(dest []driver.Value) error { - if mc := rows.mc; mc != nil { - if mc.netConn == nil { - return ErrInvalidConn - } - - // Fetch next row from stream - return rows.readRow(dest) - } - return io.EOF -} - -func (rows *textRows) Next(dest []driver.Value) error { - if mc := rows.mc; mc != nil { - if mc.netConn == nil { - return ErrInvalidConn - } - - // Fetch next row from stream - return rows.readRow(dest) - } - return io.EOF -} - -func (rows emptyRows) Columns() []string { - return nil -} - -func (rows emptyRows) Close() error { - return nil -} - -func (rows emptyRows) Next(dest []driver.Value) error { - return io.EOF -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go deleted file mode 100644 index 6e869b340..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go +++ /dev/null @@ -1,150 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "database/sql/driver" - "fmt" - "reflect" - "strconv" -) - -type mysqlStmt struct { - mc *mysqlConn - id uint32 - paramCount int - columns []mysqlField // cached from the first query -} - -func (stmt *mysqlStmt) Close() error { - if stmt.mc == nil || stmt.mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return driver.ErrBadConn - } - - err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) - stmt.mc = nil - return err -} - -func (stmt *mysqlStmt) NumInput() int { - return stmt.paramCount -} - -func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter { - return converter{} -} - -func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { - if stmt.mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - // Send command - err := stmt.writeExecutePacket(args) - if err != nil { - return nil, err - } - - mc := stmt.mc - - mc.affectedRows = 0 - mc.insertId = 0 - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err == nil { - if resLen > 0 { - // Columns - err = mc.readUntilEOF() - if err != nil { - return nil, err - } - - // Rows - err = mc.readUntilEOF() - } - if err == nil { - return &mysqlResult{ - affectedRows: int64(mc.affectedRows), - insertId: int64(mc.insertId), - }, nil - } - } - - return nil, err -} - -func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { - if stmt.mc.netConn == nil { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - // Send command - err := stmt.writeExecutePacket(args) - if err != nil { - return nil, err - } - - mc := stmt.mc - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err != nil { - return nil, err - } - - rows := new(binaryRows) - rows.mc = mc - - if resLen > 0 { - // Columns - // If not cached, read them and cache them - if stmt.columns == nil { - rows.columns, err = mc.readColumns(resLen) - stmt.columns = rows.columns - } else { - rows.columns = stmt.columns - err = mc.readUntilEOF() - } - } - - return rows, err -} - -type converter struct{} - -func (c converter) ConvertValue(v interface{}) (driver.Value, error) { - if driver.IsValue(v) { - return v, nil - } - - rv := reflect.ValueOf(v) - switch rv.Kind() { - case reflect.Ptr: - // indirect pointers - if rv.IsNil() { - return nil, nil - } - return c.ConvertValue(rv.Elem().Interface()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return rv.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: - return int64(rv.Uint()), nil - case reflect.Uint64: - u64 := rv.Uint() - if u64 >= 1<<63 { - return strconv.FormatUint(u64, 10), nil - } - return int64(u64), nil - case reflect.Float32, reflect.Float64: - return rv.Float(), nil - } - return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind()) -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go deleted file mode 100644 index 33c749b35..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go +++ /dev/null @@ -1,31 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -type mysqlTx struct { - mc *mysqlConn -} - -func (tx *mysqlTx) Commit() (err error) { - if tx.mc == nil || tx.mc.netConn == nil { - return ErrInvalidConn - } - err = tx.mc.exec("COMMIT") - tx.mc = nil - return -} - -func (tx *mysqlTx) Rollback() (err error) { - if tx.mc == nil || tx.mc.netConn == nil { - return ErrInvalidConn - } - err = tx.mc.exec("ROLLBACK") - tx.mc = nil - return -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go deleted file mode 100644 index 6a26ad129..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go +++ /dev/null @@ -1,973 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "crypto/sha1" - "crypto/tls" - "database/sql/driver" - "encoding/binary" - "errors" - "fmt" - "io" - "net" - "net/url" - "strings" - "time" -) - -var ( - tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs - - errInvalidDSNUnescaped = errors.New("Invalid DSN: Did you forget to escape a param value?") - errInvalidDSNAddr = errors.New("Invalid DSN: Network Address not terminated (missing closing brace)") - errInvalidDSNNoSlash = errors.New("Invalid DSN: Missing the slash separating the database name") - errInvalidDSNUnsafeCollation = errors.New("Invalid DSN: interpolateParams can be used with ascii, latin1, utf8 and utf8mb4 charset") -) - -func init() { - tlsConfigRegister = make(map[string]*tls.Config) -} - -// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. -// Use the key as a value in the DSN where tls=value. -// -// rootCertPool := x509.NewCertPool() -// pem, err := ioutil.ReadFile("/path/ca-cert.pem") -// if err != nil { -// log.Fatal(err) -// } -// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { -// log.Fatal("Failed to append PEM.") -// } -// clientCert := make([]tls.Certificate, 0, 1) -// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") -// if err != nil { -// log.Fatal(err) -// } -// clientCert = append(clientCert, certs) -// mysql.RegisterTLSConfig("custom", &tls.Config{ -// RootCAs: rootCertPool, -// Certificates: clientCert, -// }) -// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") -// -func RegisterTLSConfig(key string, config *tls.Config) error { - if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" { - return fmt.Errorf("Key '%s' is reserved", key) - } - - tlsConfigRegister[key] = config - return nil -} - -// DeregisterTLSConfig removes the tls.Config associated with key. -func DeregisterTLSConfig(key string) { - delete(tlsConfigRegister, key) -} - -// parseDSN parses the DSN string to a config -func parseDSN(dsn string) (cfg *config, err error) { - // New config with some default values - cfg = &config{ - loc: time.UTC, - collation: defaultCollation, - } - - // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] - // Find the last '/' (since the password or the net addr might contain a '/') - foundSlash := false - for i := len(dsn) - 1; i >= 0; i-- { - if dsn[i] == '/' { - foundSlash = true - var j, k int - - // left part is empty if i <= 0 - if i > 0 { - // [username[:password]@][protocol[(address)]] - // Find the last '@' in dsn[:i] - for j = i; j >= 0; j-- { - if dsn[j] == '@' { - // username[:password] - // Find the first ':' in dsn[:j] - for k = 0; k < j; k++ { - if dsn[k] == ':' { - cfg.passwd = dsn[k+1 : j] - break - } - } - cfg.user = dsn[:k] - - break - } - } - - // [protocol[(address)]] - // Find the first '(' in dsn[j+1:i] - for k = j + 1; k < i; k++ { - if dsn[k] == '(' { - // dsn[i-1] must be == ')' if an address is specified - if dsn[i-1] != ')' { - if strings.ContainsRune(dsn[k+1:i], ')') { - return nil, errInvalidDSNUnescaped - } - return nil, errInvalidDSNAddr - } - cfg.addr = dsn[k+1 : i-1] - break - } - } - cfg.net = dsn[j+1 : k] - } - - // dbname[?param1=value1&...¶mN=valueN] - // Find the first '?' in dsn[i+1:] - for j = i + 1; j < len(dsn); j++ { - if dsn[j] == '?' { - if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { - return - } - break - } - } - cfg.dbname = dsn[i+1 : j] - - break - } - } - - if !foundSlash && len(dsn) > 0 { - return nil, errInvalidDSNNoSlash - } - - if cfg.interpolateParams && unsafeCollations[cfg.collation] { - return nil, errInvalidDSNUnsafeCollation - } - - // Set default network if empty - if cfg.net == "" { - cfg.net = "tcp" - } - - // Set default address if empty - if cfg.addr == "" { - switch cfg.net { - case "tcp": - cfg.addr = "127.0.0.1:3306" - case "unix": - cfg.addr = "/tmp/mysql.sock" - default: - return nil, errors.New("Default addr for network '" + cfg.net + "' unknown") - } - - } - - return -} - -// parseDSNParams parses the DSN "query string" -// Values must be url.QueryEscape'ed -func parseDSNParams(cfg *config, params string) (err error) { - for _, v := range strings.Split(params, "&") { - param := strings.SplitN(v, "=", 2) - if len(param) != 2 { - continue - } - - // cfg params - switch value := param[1]; param[0] { - - // Enable client side placeholder substitution - case "interpolateParams": - var isBool bool - cfg.interpolateParams, isBool = readBool(value) - if !isBool { - return fmt.Errorf("Invalid Bool value: %s", value) - } - - // Disable INFILE whitelist / enable all files - case "allowAllFiles": - var isBool bool - cfg.allowAllFiles, isBool = readBool(value) - if !isBool { - return fmt.Errorf("Invalid Bool value: %s", value) - } - - // Use cleartext authentication mode (MySQL 5.5.10+) - case "allowCleartextPasswords": - var isBool bool - cfg.allowCleartextPasswords, isBool = readBool(value) - if !isBool { - return fmt.Errorf("Invalid Bool value: %s", value) - } - - // Use old authentication mode (pre MySQL 4.1) - case "allowOldPasswords": - var isBool bool - cfg.allowOldPasswords, isBool = readBool(value) - if !isBool { - return fmt.Errorf("Invalid Bool value: %s", value) - } - - // Switch "rowsAffected" mode - case "clientFoundRows": - var isBool bool - cfg.clientFoundRows, isBool = readBool(value) - if !isBool { - return fmt.Errorf("Invalid Bool value: %s", value) - } - - // Collation - case "collation": - collation, ok := collations[value] - if !ok { - // Note possibility for false negatives: - // could be triggered although the collation is valid if the - // collations map does not contain entries the server supports. - err = errors.New("unknown collation") - return - } - cfg.collation = collation - break - - case "columnsWithAlias": - var isBool bool - cfg.columnsWithAlias, isBool = readBool(value) - if !isBool { - return fmt.Errorf("Invalid Bool value: %s", value) - } - - // Time Location - case "loc": - if value, err = url.QueryUnescape(value); err != nil { - return - } - cfg.loc, err = time.LoadLocation(value) - if err != nil { - return - } - - // Dial Timeout - case "timeout": - cfg.timeout, err = time.ParseDuration(value) - if err != nil { - return - } - - // TLS-Encryption - case "tls": - boolValue, isBool := readBool(value) - if isBool { - if boolValue { - cfg.tls = &tls.Config{} - } - } else { - if strings.ToLower(value) == "skip-verify" { - cfg.tls = &tls.Config{InsecureSkipVerify: true} - } else if tlsConfig, ok := tlsConfigRegister[value]; ok { - if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify { - host, _, err := net.SplitHostPort(cfg.addr) - if err == nil { - tlsConfig.ServerName = host - } - } - - cfg.tls = tlsConfig - } else { - return fmt.Errorf("Invalid value / unknown config name: %s", value) - } - } - - default: - // lazy init - if cfg.params == nil { - cfg.params = make(map[string]string) - } - - if cfg.params[param[0]], err = url.QueryUnescape(value); err != nil { - return - } - } - } - - return -} - -// Returns the bool value of the input. -// The 2nd return value indicates if the input was a valid bool value -func readBool(input string) (value bool, valid bool) { - switch input { - case "1", "true", "TRUE", "True": - return true, true - case "0", "false", "FALSE", "False": - return false, true - } - - // Not a valid bool value - return -} - -/****************************************************************************** -* Authentication * -******************************************************************************/ - -// Encrypt password using 4.1+ method -func scramblePassword(scramble, password []byte) []byte { - if len(password) == 0 { - return nil - } - - // stage1Hash = SHA1(password) - crypt := sha1.New() - crypt.Write(password) - stage1 := crypt.Sum(nil) - - // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) - // inner Hash - crypt.Reset() - crypt.Write(stage1) - hash := crypt.Sum(nil) - - // outer Hash - crypt.Reset() - crypt.Write(scramble) - crypt.Write(hash) - scramble = crypt.Sum(nil) - - // token = scrambleHash XOR stage1Hash - for i := range scramble { - scramble[i] ^= stage1[i] - } - return scramble -} - -// Encrypt password using pre 4.1 (old password) method -// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c -type myRnd struct { - seed1, seed2 uint32 -} - -const myRndMaxVal = 0x3FFFFFFF - -// Pseudo random number generator -func newMyRnd(seed1, seed2 uint32) *myRnd { - return &myRnd{ - seed1: seed1 % myRndMaxVal, - seed2: seed2 % myRndMaxVal, - } -} - -// Tested to be equivalent to MariaDB's floating point variant -// http://play.golang.org/p/QHvhd4qved -// http://play.golang.org/p/RG0q4ElWDx -func (r *myRnd) NextByte() byte { - r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal - r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal - - return byte(uint64(r.seed1) * 31 / myRndMaxVal) -} - -// Generate binary hash from byte string using insecure pre 4.1 method -func pwHash(password []byte) (result [2]uint32) { - var add uint32 = 7 - var tmp uint32 - - result[0] = 1345345333 - result[1] = 0x12345671 - - for _, c := range password { - // skip spaces and tabs in password - if c == ' ' || c == '\t' { - continue - } - - tmp = uint32(c) - result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) - result[1] += (result[1] << 8) ^ result[0] - add += tmp - } - - // Remove sign bit (1<<31)-1) - result[0] &= 0x7FFFFFFF - result[1] &= 0x7FFFFFFF - - return -} - -// Encrypt password using insecure pre 4.1 method -func scrambleOldPassword(scramble, password []byte) []byte { - if len(password) == 0 { - return nil - } - - scramble = scramble[:8] - - hashPw := pwHash(password) - hashSc := pwHash(scramble) - - r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) - - var out [8]byte - for i := range out { - out[i] = r.NextByte() + 64 - } - - mask := r.NextByte() - for i := range out { - out[i] ^= mask - } - - return out[:] -} - -/****************************************************************************** -* Time related utils * -******************************************************************************/ - -// NullTime represents a time.Time that may be NULL. -// NullTime implements the Scanner interface so -// it can be used as a scan destination: -// -// var nt NullTime -// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) -// ... -// if nt.Valid { -// // use nt.Time -// } else { -// // NULL value -// } -// -// This NullTime implementation is not driver-specific -type NullTime struct { - Time time.Time - Valid bool // Valid is true if Time is not NULL -} - -// Scan implements the Scanner interface. -// The value type must be time.Time or string / []byte (formatted time-string), -// otherwise Scan fails. -func (nt *NullTime) Scan(value interface{}) (err error) { - if value == nil { - nt.Time, nt.Valid = time.Time{}, false - return - } - - switch v := value.(type) { - case time.Time: - nt.Time, nt.Valid = v, true - return - case []byte: - nt.Time, err = parseDateTime(string(v), time.UTC) - nt.Valid = (err == nil) - return - case string: - nt.Time, err = parseDateTime(v, time.UTC) - nt.Valid = (err == nil) - return - } - - nt.Valid = false - return fmt.Errorf("Can't convert %T to time.Time", value) -} - -// Value implements the driver Valuer interface. -func (nt NullTime) Value() (driver.Value, error) { - if !nt.Valid { - return nil, nil - } - return nt.Time, nil -} - -func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { - base := "0000-00-00 00:00:00.0000000" - switch len(str) { - case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" - if str == base[:len(str)] { - return - } - t, err = time.Parse(timeFormat[:len(str)], str) - default: - err = fmt.Errorf("Invalid Time-String: %s", str) - return - } - - // Adjust location - if err == nil && loc != time.UTC { - y, mo, d := t.Date() - h, mi, s := t.Clock() - t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil - } - - return -} - -func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { - switch num { - case 0: - return time.Time{}, nil - case 4: - return time.Date( - int(binary.LittleEndian.Uint16(data[:2])), // year - time.Month(data[2]), // month - int(data[3]), // day - 0, 0, 0, 0, - loc, - ), nil - case 7: - return time.Date( - int(binary.LittleEndian.Uint16(data[:2])), // year - time.Month(data[2]), // month - int(data[3]), // day - int(data[4]), // hour - int(data[5]), // minutes - int(data[6]), // seconds - 0, - loc, - ), nil - case 11: - return time.Date( - int(binary.LittleEndian.Uint16(data[:2])), // year - time.Month(data[2]), // month - int(data[3]), // day - int(data[4]), // hour - int(data[5]), // minutes - int(data[6]), // seconds - int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds - loc, - ), nil - } - return nil, fmt.Errorf("Invalid DATETIME-packet length %d", num) -} - -// zeroDateTime is used in formatBinaryDateTime to avoid an allocation -// if the DATE or DATETIME has the zero value. -// It must never be changed. -// The current behavior depends on database/sql copying the result. -var zeroDateTime = []byte("0000-00-00 00:00:00.000000") - -const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" -const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" - -func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) { - // length expects the deterministic length of the zero value, - // negative time and 100+ hours are automatically added if needed - if len(src) == 0 { - if justTime { - return zeroDateTime[11 : 11+length], nil - } - return zeroDateTime[:length], nil - } - var dst []byte // return value - var pt, p1, p2, p3 byte // current digit pair - var zOffs byte // offset of value in zeroDateTime - if justTime { - switch length { - case - 8, // time (can be up to 10 when negative and 100+ hours) - 10, 11, 12, 13, 14, 15: // time with fractional seconds - default: - return nil, fmt.Errorf("illegal TIME length %d", length) - } - switch len(src) { - case 8, 12: - default: - return nil, fmt.Errorf("Invalid TIME-packet length %d", len(src)) - } - // +2 to enable negative time and 100+ hours - dst = make([]byte, 0, length+2) - if src[0] == 1 { - dst = append(dst, '-') - } - if src[1] != 0 { - hour := uint16(src[1])*24 + uint16(src[5]) - pt = byte(hour / 100) - p1 = byte(hour - 100*uint16(pt)) - dst = append(dst, digits01[pt]) - } else { - p1 = src[5] - } - zOffs = 11 - src = src[6:] - } else { - switch length { - case 10, 19, 21, 22, 23, 24, 25, 26: - default: - t := "DATE" - if length > 10 { - t += "TIME" - } - return nil, fmt.Errorf("illegal %s length %d", t, length) - } - switch len(src) { - case 4, 7, 11: - default: - t := "DATE" - if length > 10 { - t += "TIME" - } - return nil, fmt.Errorf("illegal %s-packet length %d", t, len(src)) - } - dst = make([]byte, 0, length) - // start with the date - year := binary.LittleEndian.Uint16(src[:2]) - pt = byte(year / 100) - p1 = byte(year - 100*uint16(pt)) - p2, p3 = src[2], src[3] - dst = append(dst, - digits10[pt], digits01[pt], - digits10[p1], digits01[p1], '-', - digits10[p2], digits01[p2], '-', - digits10[p3], digits01[p3], - ) - if length == 10 { - return dst, nil - } - if len(src) == 4 { - return append(dst, zeroDateTime[10:length]...), nil - } - dst = append(dst, ' ') - p1 = src[4] // hour - src = src[5:] - } - // p1 is 2-digit hour, src is after hour - p2, p3 = src[0], src[1] - dst = append(dst, - digits10[p1], digits01[p1], ':', - digits10[p2], digits01[p2], ':', - digits10[p3], digits01[p3], - ) - if length <= byte(len(dst)) { - return dst, nil - } - src = src[2:] - if len(src) == 0 { - return append(dst, zeroDateTime[19:zOffs+length]...), nil - } - microsecs := binary.LittleEndian.Uint32(src[:4]) - p1 = byte(microsecs / 10000) - microsecs -= 10000 * uint32(p1) - p2 = byte(microsecs / 100) - microsecs -= 100 * uint32(p2) - p3 = byte(microsecs) - switch decimals := zOffs + length - 20; decimals { - default: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], digits01[p2], - digits10[p3], digits01[p3], - ), nil - case 1: - return append(dst, '.', - digits10[p1], - ), nil - case 2: - return append(dst, '.', - digits10[p1], digits01[p1], - ), nil - case 3: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], - ), nil - case 4: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], digits01[p2], - ), nil - case 5: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], digits01[p2], - digits10[p3], - ), nil - } -} - -/****************************************************************************** -* Convert from and to bytes * -******************************************************************************/ - -func uint64ToBytes(n uint64) []byte { - return []byte{ - byte(n), - byte(n >> 8), - byte(n >> 16), - byte(n >> 24), - byte(n >> 32), - byte(n >> 40), - byte(n >> 48), - byte(n >> 56), - } -} - -func uint64ToString(n uint64) []byte { - var a [20]byte - i := 20 - - // U+0030 = 0 - // ... - // U+0039 = 9 - - var q uint64 - for n >= 10 { - i-- - q = n / 10 - a[i] = uint8(n-q*10) + 0x30 - n = q - } - - i-- - a[i] = uint8(n) + 0x30 - - return a[i:] -} - -// treats string value as unsigned integer representation -func stringToInt(b []byte) int { - val := 0 - for i := range b { - val *= 10 - val += int(b[i] - 0x30) - } - return val -} - -// returns the string read as a bytes slice, wheter the value is NULL, -// the number of bytes read and an error, in case the string is longer than -// the input slice -func readLengthEncodedString(b []byte) ([]byte, bool, int, error) { - // Get length - num, isNull, n := readLengthEncodedInteger(b) - if num < 1 { - return b[n:n], isNull, n, nil - } - - n += int(num) - - // Check data length - if len(b) >= n { - return b[n-int(num) : n], false, n, nil - } - return nil, false, n, io.EOF -} - -// returns the number of bytes skipped and an error, in case the string is -// longer than the input slice -func skipLengthEncodedString(b []byte) (int, error) { - // Get length - num, _, n := readLengthEncodedInteger(b) - if num < 1 { - return n, nil - } - - n += int(num) - - // Check data length - if len(b) >= n { - return n, nil - } - return n, io.EOF -} - -// returns the number read, whether the value is NULL and the number of bytes read -func readLengthEncodedInteger(b []byte) (uint64, bool, int) { - // See issue #349 - if len(b) == 0 { - return 0, true, 1 - } - switch b[0] { - - // 251: NULL - case 0xfb: - return 0, true, 1 - - // 252: value of following 2 - case 0xfc: - return uint64(b[1]) | uint64(b[2])<<8, false, 3 - - // 253: value of following 3 - case 0xfd: - return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 - - // 254: value of following 8 - case 0xfe: - return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | - uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | - uint64(b[7])<<48 | uint64(b[8])<<56, - false, 9 - } - - // 0-250: value of first byte - return uint64(b[0]), false, 1 -} - -// encodes a uint64 value and appends it to the given bytes slice -func appendLengthEncodedInteger(b []byte, n uint64) []byte { - switch { - case n <= 250: - return append(b, byte(n)) - - case n <= 0xffff: - return append(b, 0xfc, byte(n), byte(n>>8)) - - case n <= 0xffffff: - return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) - } - return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), - byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) -} - -// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. -// If cap(buf) is not enough, reallocate new buffer. -func reserveBuffer(buf []byte, appendSize int) []byte { - newSize := len(buf) + appendSize - if cap(buf) < newSize { - // Grow buffer exponentially - newBuf := make([]byte, len(buf)*2+appendSize) - copy(newBuf, buf) - buf = newBuf - } - return buf[:newSize] -} - -// escapeBytesBackslash escapes []byte with backslashes (\) -// This escapes the contents of a string (provided as []byte) by adding backslashes before special -// characters, and turning others into specific escape sequences, such as -// turning newlines into \n and null bytes into \0. -// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932 -func escapeBytesBackslash(buf, v []byte) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for _, c := range v { - switch c { - case '\x00': - buf[pos] = '\\' - buf[pos+1] = '0' - pos += 2 - case '\n': - buf[pos] = '\\' - buf[pos+1] = 'n' - pos += 2 - case '\r': - buf[pos] = '\\' - buf[pos+1] = 'r' - pos += 2 - case '\x1a': - buf[pos] = '\\' - buf[pos+1] = 'Z' - pos += 2 - case '\'': - buf[pos] = '\\' - buf[pos+1] = '\'' - pos += 2 - case '"': - buf[pos] = '\\' - buf[pos+1] = '"' - pos += 2 - case '\\': - buf[pos] = '\\' - buf[pos+1] = '\\' - pos += 2 - default: - buf[pos] = c - pos += 1 - } - } - - return buf[:pos] -} - -// escapeStringBackslash is similar to escapeBytesBackslash but for string. -func escapeStringBackslash(buf []byte, v string) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for i := 0; i < len(v); i++ { - c := v[i] - switch c { - case '\x00': - buf[pos] = '\\' - buf[pos+1] = '0' - pos += 2 - case '\n': - buf[pos] = '\\' - buf[pos+1] = 'n' - pos += 2 - case '\r': - buf[pos] = '\\' - buf[pos+1] = 'r' - pos += 2 - case '\x1a': - buf[pos] = '\\' - buf[pos+1] = 'Z' - pos += 2 - case '\'': - buf[pos] = '\\' - buf[pos+1] = '\'' - pos += 2 - case '"': - buf[pos] = '\\' - buf[pos+1] = '"' - pos += 2 - case '\\': - buf[pos] = '\\' - buf[pos+1] = '\\' - pos += 2 - default: - buf[pos] = c - pos += 1 - } - } - - return buf[:pos] -} - -// escapeBytesQuotes escapes apostrophes in []byte by doubling them up. -// This escapes the contents of a string by doubling up any apostrophes that -// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in -// effect on the server. -// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038 -func escapeBytesQuotes(buf, v []byte) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for _, c := range v { - if c == '\'' { - buf[pos] = '\'' - buf[pos+1] = '\'' - pos += 2 - } else { - buf[pos] = c - pos++ - } - } - - return buf[:pos] -} - -// escapeStringQuotes is similar to escapeBytesQuotes but for string. -func escapeStringQuotes(buf []byte, v string) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for i := 0; i < len(v); i++ { - c := v[i] - if c == '\'' { - buf[pos] = '\'' - buf[pos+1] = '\'' - pos += 2 - } else { - buf[pos] = c - pos++ - } - } - - return buf[:pos] -} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go deleted file mode 100644 index 79fbdd1eb..000000000 --- a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go +++ /dev/null @@ -1,346 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "bytes" - "crypto/tls" - "encoding/binary" - "fmt" - "testing" - "time" -) - -var testDSNs = []struct { - in string - out string - loc *time.Location -}{ - {"username:password@protocol(address)/dbname?param=value", "&{user:username passwd:password net:protocol addr:address dbname:dbname params:map[param:value] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, - {"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true", "&{user:username passwd:password net:protocol addr:address dbname:dbname params:map[param:value] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:true interpolateParams:false}", time.UTC}, - {"user@unix(/path/to/socket)/dbname?charset=utf8", "&{user:user passwd: net:unix addr:/path/to/socket dbname:dbname params:map[charset:utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, - {"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true", "&{user:user passwd:password net:tcp addr:localhost:5555 dbname:dbname params:map[charset:utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, - {"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify", "&{user:user passwd:password net:tcp addr:localhost:5555 dbname:dbname params:map[charset:utf8mb4,utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, - {"user:password@/dbname?loc=UTC&timeout=30s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci", "&{user:user passwd:password net:tcp addr:127.0.0.1:3306 dbname:dbname params:map[] loc:%p tls: timeout:30000000000 collation:224 allowAllFiles:true allowOldPasswords:true allowCleartextPasswords:false clientFoundRows:true columnsWithAlias:false interpolateParams:false}", time.UTC}, - {"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local", "&{user:user passwd:p@ss(word) net:tcp addr:[de:ad:be:ef::ca:fe]:80 dbname:dbname params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.Local}, - {"/dbname", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname:dbname params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, - {"@/", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, - {"/", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, - {"", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, - {"user:p@/ssword@/", "&{user:user passwd:p@/ssword net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, - {"unix/?arg=%2Fsome%2Fpath.ext", "&{user: passwd: net:unix addr:/tmp/mysql.sock dbname: params:map[arg:/some/path.ext] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, -} - -func TestDSNParser(t *testing.T) { - var cfg *config - var err error - var res string - - for i, tst := range testDSNs { - cfg, err = parseDSN(tst.in) - if err != nil { - t.Error(err.Error()) - } - - // pointer not static - cfg.tls = nil - - res = fmt.Sprintf("%+v", cfg) - if res != fmt.Sprintf(tst.out, tst.loc) { - t.Errorf("%d. parseDSN(%q) => %q, want %q", i, tst.in, res, fmt.Sprintf(tst.out, tst.loc)) - } - } -} - -func TestDSNParserInvalid(t *testing.T) { - var invalidDSNs = []string{ - "@net(addr/", // no closing brace - "@tcp(/", // no closing brace - "tcp(/", // no closing brace - "(/", // no closing brace - "net(addr)//", // unescaped - "user:pass@tcp(1.2.3.4:3306)", // no trailing slash - //"/dbname?arg=/some/unescaped/path", - } - - for i, tst := range invalidDSNs { - if _, err := parseDSN(tst); err == nil { - t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst) - } - } -} - -func TestDSNWithCustomTLS(t *testing.T) { - baseDSN := "user:password@tcp(localhost:5555)/dbname?tls=" - tlsCfg := tls.Config{} - - RegisterTLSConfig("utils_test", &tlsCfg) - - // Custom TLS is missing - tst := baseDSN + "invalid_tls" - cfg, err := parseDSN(tst) - if err == nil { - t.Errorf("Invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg) - } - - tst = baseDSN + "utils_test" - - // Custom TLS with a server name - name := "foohost" - tlsCfg.ServerName = name - cfg, err = parseDSN(tst) - - if err != nil { - t.Error(err.Error()) - } else if cfg.tls.ServerName != name { - t.Errorf("Did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst) - } - - // Custom TLS without a server name - name = "localhost" - tlsCfg.ServerName = "" - cfg, err = parseDSN(tst) - - if err != nil { - t.Error(err.Error()) - } else if cfg.tls.ServerName != name { - t.Errorf("Did not get the correct ServerName (%s) parsing DSN (%s).", name, tst) - } - - DeregisterTLSConfig("utils_test") -} - -func TestDSNUnsafeCollation(t *testing.T) { - _, err := parseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true") - if err != errInvalidDSNUnsafeCollation { - t.Error("Expected %v, Got %v", errInvalidDSNUnsafeCollation, err) - } - - _, err = parseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false") - if err != nil { - t.Error("Expected %v, Got %v", nil, err) - } - - _, err = parseDSN("/dbname?collation=gbk_chinese_ci") - if err != nil { - t.Error("Expected %v, Got %v", nil, err) - } - - _, err = parseDSN("/dbname?collation=ascii_bin&interpolateParams=true") - if err != nil { - t.Error("Expected %v, Got %v", nil, err) - } - - _, err = parseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true") - if err != nil { - t.Error("Expected %v, Got %v", nil, err) - } - - _, err = parseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true") - if err != nil { - t.Error("Expected %v, Got %v", nil, err) - } - - _, err = parseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true") - if err != nil { - t.Error("Expected %v, Got %v", nil, err) - } -} - -func BenchmarkParseDSN(b *testing.B) { - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - for _, tst := range testDSNs { - if _, err := parseDSN(tst.in); err != nil { - b.Error(err.Error()) - } - } - } -} - -func TestScanNullTime(t *testing.T) { - var scanTests = []struct { - in interface{} - error bool - valid bool - time time.Time - }{ - {tDate, false, true, tDate}, - {sDate, false, true, tDate}, - {[]byte(sDate), false, true, tDate}, - {tDateTime, false, true, tDateTime}, - {sDateTime, false, true, tDateTime}, - {[]byte(sDateTime), false, true, tDateTime}, - {tDate0, false, true, tDate0}, - {sDate0, false, true, tDate0}, - {[]byte(sDate0), false, true, tDate0}, - {sDateTime0, false, true, tDate0}, - {[]byte(sDateTime0), false, true, tDate0}, - {"", true, false, tDate0}, - {"1234", true, false, tDate0}, - {0, true, false, tDate0}, - } - - var nt = NullTime{} - var err error - - for _, tst := range scanTests { - err = nt.Scan(tst.in) - if (err != nil) != tst.error { - t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil)) - } - if nt.Valid != tst.valid { - t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid) - } - if nt.Time != tst.time { - t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time) - } - } -} - -func TestLengthEncodedInteger(t *testing.T) { - var integerTests = []struct { - num uint64 - encoded []byte - }{ - {0x0000000000000000, []byte{0x00}}, - {0x0000000000000012, []byte{0x12}}, - {0x00000000000000fa, []byte{0xfa}}, - {0x0000000000000100, []byte{0xfc, 0x00, 0x01}}, - {0x0000000000001234, []byte{0xfc, 0x34, 0x12}}, - {0x000000000000ffff, []byte{0xfc, 0xff, 0xff}}, - {0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}}, - {0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}}, - {0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}}, - {0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}}, - {0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}}, - {0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, - } - - for _, tst := range integerTests { - num, isNull, numLen := readLengthEncodedInteger(tst.encoded) - if isNull { - t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num) - } - if num != tst.num { - t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num) - } - if numLen != len(tst.encoded) { - t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen) - } - encoded := appendLengthEncodedInteger(nil, num) - if !bytes.Equal(encoded, tst.encoded) { - t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded) - } - } -} - -func TestOldPass(t *testing.T) { - scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2} - vectors := []struct { - pass string - out string - }{ - {" pass", "47575c5a435b4251"}, - {"pass ", "47575c5a435b4251"}, - {"123\t456", "575c47505b5b5559"}, - {"C0mpl!ca ted#PASS123", "5d5d554849584a45"}, - } - for _, tuple := range vectors { - ours := scrambleOldPassword(scramble, []byte(tuple.pass)) - if tuple.out != fmt.Sprintf("%x", ours) { - t.Errorf("Failed old password %q", tuple.pass) - } - } -} - -func TestFormatBinaryDateTime(t *testing.T) { - rawDate := [11]byte{} - binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years - rawDate[2] = 12 // months - rawDate[3] = 30 // days - rawDate[4] = 15 // hours - rawDate[5] = 46 // minutes - rawDate[6] = 23 // seconds - binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds - expect := func(expected string, inlen, outlen uint8) { - actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen, false) - bytes, ok := actual.([]byte) - if !ok { - t.Errorf("formatBinaryDateTime must return []byte, was %T", actual) - } - if string(bytes) != expected { - t.Errorf( - "expected %q, got %q for length in %d, out %d", - bytes, actual, inlen, outlen, - ) - } - } - expect("0000-00-00", 0, 10) - expect("0000-00-00 00:00:00", 0, 19) - expect("1978-12-30", 4, 10) - expect("1978-12-30 15:46:23", 7, 19) - expect("1978-12-30 15:46:23.987654", 11, 26) -} - -func TestEscapeBackslash(t *testing.T) { - expect := func(expected, value string) { - actual := string(escapeBytesBackslash([]byte{}, []byte(value))) - if actual != expected { - t.Errorf( - "expected %s, got %s", - expected, actual, - ) - } - - actual = string(escapeStringBackslash([]byte{}, value)) - if actual != expected { - t.Errorf( - "expected %s, got %s", - expected, actual, - ) - } - } - - expect("foo\\0bar", "foo\x00bar") - expect("foo\\nbar", "foo\nbar") - expect("foo\\rbar", "foo\rbar") - expect("foo\\Zbar", "foo\x1abar") - expect("foo\\\"bar", "foo\"bar") - expect("foo\\\\bar", "foo\\bar") - expect("foo\\'bar", "foo'bar") -} - -func TestEscapeQuotes(t *testing.T) { - expect := func(expected, value string) { - actual := string(escapeBytesQuotes([]byte{}, []byte(value))) - if actual != expected { - t.Errorf( - "expected %s, got %s", - expected, actual, - ) - } - - actual = string(escapeStringQuotes([]byte{}, value)) - if actual != expected { - t.Errorf( - "expected %s, got %s", - expected, actual, - ) - } - } - - expect("foo\x00bar", "foo\x00bar") // not affected - expect("foo\nbar", "foo\nbar") // not affected - expect("foo\rbar", "foo\rbar") // not affected - expect("foo\x1abar", "foo\x1abar") // not affected - expect("foo''bar", "foo'bar") // affected - expect("foo\"bar", "foo\"bar") // not affected -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/Makefile b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/Makefile deleted file mode 100644 index 23a6b1734..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc-min-version --version="3.0.0" --proto_path=.:../../../../ --gogo_out=. proto3_proto/proto3.proto - make diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/all_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/all_test.go deleted file mode 100644 index 88c506cf0..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/all_test.go +++ /dev/null @@ -1,2104 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "math/rand" - "reflect" - "runtime/debug" - "strings" - "testing" - "time" - - . "github.com/gogo/protobuf/proto" - . "github.com/gogo/protobuf/proto/testdata" -) - -var globalO *Buffer - -func old() *Buffer { - if globalO == nil { - globalO = NewBuffer(nil) - } - globalO.Reset() - return globalO -} - -func equalbytes(b1, b2 []byte, t *testing.T) { - if len(b1) != len(b2) { - t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) - return - } - for i := 0; i < len(b1); i++ { - if b1[i] != b2[i] { - t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) - } - } -} - -func initGoTestField() *GoTestField { - f := new(GoTestField) - f.Label = String("label") - f.Type = String("type") - return f -} - -// These are all structurally equivalent but the tag numbers differ. -// (It's remarkable that required, optional, and repeated all have -// 8 letters.) -func initGoTest_RequiredGroup() *GoTest_RequiredGroup { - return &GoTest_RequiredGroup{ - RequiredField: String("required"), - } -} - -func initGoTest_OptionalGroup() *GoTest_OptionalGroup { - return &GoTest_OptionalGroup{ - RequiredField: String("optional"), - } -} - -func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { - return &GoTest_RepeatedGroup{ - RequiredField: String("repeated"), - } -} - -func initGoTest(setdefaults bool) *GoTest { - pb := new(GoTest) - if setdefaults { - pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) - pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) - pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) - pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) - pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) - pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) - pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) - pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) - pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) - pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) - pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted - pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) - pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) - } - - pb.Kind = GoTest_TIME.Enum() - pb.RequiredField = initGoTestField() - pb.F_BoolRequired = Bool(true) - pb.F_Int32Required = Int32(3) - pb.F_Int64Required = Int64(6) - pb.F_Fixed32Required = Uint32(32) - pb.F_Fixed64Required = Uint64(64) - pb.F_Uint32Required = Uint32(3232) - pb.F_Uint64Required = Uint64(6464) - pb.F_FloatRequired = Float32(3232) - pb.F_DoubleRequired = Float64(6464) - pb.F_StringRequired = String("string") - pb.F_BytesRequired = []byte("bytes") - pb.F_Sint32Required = Int32(-32) - pb.F_Sint64Required = Int64(-64) - pb.Requiredgroup = initGoTest_RequiredGroup() - - return pb -} - -func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { - data := b.Bytes() - ld := len(data) - ls := len(s) / 2 - - fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) - - // find the interesting spot - n - n := ls - if ld < ls { - n = ld - } - j := 0 - for i := 0; i < n; i++ { - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - if data[i] == bs { - continue - } - n = i - break - } - l := n - 10 - if l < 0 { - l = 0 - } - h := n + 10 - - // find the interesting spot - n - fmt.Printf("is[%d]:", l) - for i := l; i < h; i++ { - if i >= ld { - fmt.Printf(" --") - continue - } - fmt.Printf(" %.2x", data[i]) - } - fmt.Printf("\n") - - fmt.Printf("sb[%d]:", l) - for i := l; i < h; i++ { - if i >= ls { - fmt.Printf(" --") - continue - } - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - fmt.Printf(" %.2x", bs) - } - fmt.Printf("\n") - - t.Fail() - - // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) - // Print the output in a partially-decoded format; can - // be helpful when updating the test. It produces the output - // that is pasted, with minor edits, into the argument to verify(). - // data := b.Bytes() - // nesting := 0 - // for b.Len() > 0 { - // start := len(data) - b.Len() - // var u uint64 - // u, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // wire := u & 0x7 - // tag := u >> 3 - // switch wire { - // case WireVarint: - // v, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed32: - // v, err := DecodeFixed32(b) - // if err != nil { - // fmt.Printf("decode error on fixed32:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed64: - // v, err := DecodeFixed64(b) - // if err != nil { - // fmt.Printf("decode error on fixed64:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireBytes: - // nb, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // after_tag := len(data) - b.Len() - // str := make([]byte, nb) - // _, err = b.Read(str) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", - // data[start:after_tag], str, tag, wire) - // case WireStartGroup: - // nesting++ - // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // case WireEndGroup: - // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // nesting-- - // default: - // fmt.Printf("unrecognized wire type %d\n", wire) - // return - // } - // } -} - -func hex(c uint8) uint8 { - if '0' <= c && c <= '9' { - return c - '0' - } - if 'a' <= c && c <= 'f' { - return 10 + c - 'a' - } - if 'A' <= c && c <= 'F' { - return 10 + c - 'A' - } - return 0 -} - -func equal(b []byte, s string, t *testing.T) bool { - if 2*len(b) != len(s) { - // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) - fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) - return false - } - for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { - x := hex(s[j])*16 + hex(s[j+1]) - if b[i] != x { - // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) - fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) - return false - } - } - return true -} - -func overify(t *testing.T, pb *GoTest, expected string) { - o := old() - err := o.Marshal(pb) - if err != nil { - fmt.Printf("overify marshal-1 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 1", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = o.Unmarshal(pbd) - if err != nil { - t.Fatalf("overify unmarshal err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - o.Reset() - err = o.Marshal(pbd) - if err != nil { - t.Errorf("overify marshal-2 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 2", o.Bytes()) - t.Fatalf("string = %s", expected) - } -} - -// Simple tests for numeric encode/decode primitives (varint, etc.) -func TestNumericPrimitives(t *testing.T) { - for i := uint64(0); i < 1e6; i += 111 { - o := old() - if o.EncodeVarint(i) != nil { - t.Error("EncodeVarint") - break - } - x, e := o.DecodeVarint() - if e != nil { - t.Fatal("DecodeVarint") - } - if x != i { - t.Fatal("varint decode fail:", i, x) - } - - o = old() - if o.EncodeFixed32(i) != nil { - t.Fatal("encFixed32") - } - x, e = o.DecodeFixed32() - if e != nil { - t.Fatal("decFixed32") - } - if x != i { - t.Fatal("fixed32 decode fail:", i, x) - } - - o = old() - if o.EncodeFixed64(i*1234567) != nil { - t.Error("encFixed64") - break - } - x, e = o.DecodeFixed64() - if e != nil { - t.Error("decFixed64") - break - } - if x != i*1234567 { - t.Error("fixed64 decode fail:", i*1234567, x) - break - } - - o = old() - i32 := int32(i - 12345) - if o.EncodeZigzag32(uint64(i32)) != nil { - t.Fatal("EncodeZigzag32") - } - x, e = o.DecodeZigzag32() - if e != nil { - t.Fatal("DecodeZigzag32") - } - if x != uint64(uint32(i32)) { - t.Fatal("zigzag32 decode fail:", i32, x) - } - - o = old() - i64 := int64(i - 12345) - if o.EncodeZigzag64(uint64(i64)) != nil { - t.Fatal("EncodeZigzag64") - } - x, e = o.DecodeZigzag64() - if e != nil { - t.Fatal("DecodeZigzag64") - } - if x != uint64(i64) { - t.Fatal("zigzag64 decode fail:", i64, x) - } - } -} - -// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. -type fakeMarshaler struct { - b []byte - err error -} - -func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } -func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } -func (f *fakeMarshaler) ProtoMessage() {} -func (f *fakeMarshaler) Reset() {} - -type msgWithFakeMarshaler struct { - M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"` -} - -func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) } -func (m *msgWithFakeMarshaler) ProtoMessage() {} -func (m *msgWithFakeMarshaler) Reset() {} - -// Simple tests for proto messages that implement the Marshaler interface. -func TestMarshalerEncoding(t *testing.T) { - tests := []struct { - name string - m Message - want []byte - wantErr error - }{ - { - name: "Marshaler that fails", - m: &fakeMarshaler{ - err: errors.New("some marshal err"), - b: []byte{5, 6, 7}, - }, - // Since there's an error, nothing should be written to buffer. - want: nil, - wantErr: errors.New("some marshal err"), - }, - { - name: "Marshaler that fails with RequiredNotSetError", - m: &msgWithFakeMarshaler{ - M: &fakeMarshaler{ - err: &RequiredNotSetError{}, - b: []byte{5, 6, 7}, - }, - }, - // Since there's an error that can be continued after, - // the buffer should be written. - want: []byte{ - 10, 3, // for &msgWithFakeMarshaler - 5, 6, 7, // for &fakeMarshaler - }, - wantErr: &RequiredNotSetError{}, - }, - { - name: "Marshaler that succeeds", - m: &fakeMarshaler{ - b: []byte{0, 1, 2, 3, 4, 127, 255}, - }, - want: []byte{0, 1, 2, 3, 4, 127, 255}, - wantErr: nil, - }, - } - for _, test := range tests { - b := NewBuffer(nil) - err := b.Marshal(test.m) - if _, ok := err.(*RequiredNotSetError); ok { - // We're not in package proto, so we can only assert the type in this case. - err = &RequiredNotSetError{} - } - if !reflect.DeepEqual(test.wantErr, err) { - t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) - } - if !reflect.DeepEqual(test.want, b.Bytes()) { - t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) - } - } -} - -// Simple tests for bytes -func TestBytesPrimitives(t *testing.T) { - o := old() - bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} - if o.EncodeRawBytes(bytes) != nil { - t.Error("EncodeRawBytes") - } - decb, e := o.DecodeRawBytes(false) - if e != nil { - t.Error("DecodeRawBytes") - } - equalbytes(bytes, decb, t) -} - -// Simple tests for strings -func TestStringPrimitives(t *testing.T) { - o := old() - s := "now is the time" - if o.EncodeStringBytes(s) != nil { - t.Error("enc_string") - } - decs, e := o.DecodeStringBytes() - if e != nil { - t.Error("dec_string") - } - if s != decs { - t.Error("string encode/decode fail:", s, decs) - } -} - -// Do we catch the "required bit not set" case? -func TestRequiredBit(t *testing.T) { - o := old() - pb := new(GoTest) - err := o.Marshal(pb) - if err == nil { - t.Error("did not catch missing required fields") - } else if strings.Index(err.Error(), "Kind") < 0 { - t.Error("wrong error type:", err) - } -} - -// Check that all fields are nil. -// Clearly silly, and a residue from a more interesting test with an earlier, -// different initialization property, but it once caught a compiler bug so -// it lives. -func checkInitialized(pb *GoTest, t *testing.T) { - if pb.F_BoolDefaulted != nil { - t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) - } - if pb.F_Int32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) - } - if pb.F_Int64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) - } - if pb.F_Fixed32Defaulted != nil { - t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) - } - if pb.F_Fixed64Defaulted != nil { - t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) - } - if pb.F_Uint32Defaulted != nil { - t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) - } - if pb.F_Uint64Defaulted != nil { - t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) - } - if pb.F_FloatDefaulted != nil { - t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) - } - if pb.F_DoubleDefaulted != nil { - t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) - } - if pb.F_StringDefaulted != nil { - t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) - } - if pb.F_BytesDefaulted != nil { - t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) - } - if pb.F_Sint32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) - } - if pb.F_Sint64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) - } -} - -// Does Reset() reset? -func TestReset(t *testing.T) { - pb := initGoTest(true) - // muck with some values - pb.F_BoolDefaulted = Bool(false) - pb.F_Int32Defaulted = Int32(237) - pb.F_Int64Defaulted = Int64(12346) - pb.F_Fixed32Defaulted = Uint32(32000) - pb.F_Fixed64Defaulted = Uint64(666) - pb.F_Uint32Defaulted = Uint32(323232) - pb.F_Uint64Defaulted = nil - pb.F_FloatDefaulted = nil - pb.F_DoubleDefaulted = Float64(0) - pb.F_StringDefaulted = String("gotcha") - pb.F_BytesDefaulted = []byte("asdfasdf") - pb.F_Sint32Defaulted = Int32(123) - pb.F_Sint64Defaulted = Int64(789) - pb.Reset() - checkInitialized(pb, t) -} - -// All required fields set, no defaults provided. -func TestEncodeDecode1(t *testing.T) { - pb := initGoTest(false) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 0x20 - "714000000000000000"+ // field 14, encoding 1, value 0x40 - "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 - "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" - "b304"+ // field 70, encoding 3, start group - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // field 70, encoding 4, end group - "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f") // field 103, encoding 0, 0x7f zigzag64 -} - -// All required fields set, defaults provided. -func TestEncodeDecode2(t *testing.T) { - pb := initGoTest(true) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All default fields set to their default value by hand -func TestEncodeDecode3(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolDefaulted = Bool(true) - pb.F_Int32Defaulted = Int32(32) - pb.F_Int64Defaulted = Int64(64) - pb.F_Fixed32Defaulted = Uint32(320) - pb.F_Fixed64Defaulted = Uint64(640) - pb.F_Uint32Defaulted = Uint32(3200) - pb.F_Uint64Defaulted = Uint64(6400) - pb.F_FloatDefaulted = Float32(314159) - pb.F_DoubleDefaulted = Float64(271828) - pb.F_StringDefaulted = String("hello, \"world!\"\n") - pb.F_BytesDefaulted = []byte("Bignose") - pb.F_Sint32Defaulted = Int32(-32) - pb.F_Sint64Defaulted = Int64(-64) - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all non-defaulted optional fields have values. -func TestEncodeDecode4(t *testing.T) { - pb := initGoTest(true) - pb.Table = String("hello") - pb.Param = Int32(7) - pb.OptionalField = initGoTestField() - pb.F_BoolOptional = Bool(true) - pb.F_Int32Optional = Int32(32) - pb.F_Int64Optional = Int64(64) - pb.F_Fixed32Optional = Uint32(3232) - pb.F_Fixed64Optional = Uint64(6464) - pb.F_Uint32Optional = Uint32(323232) - pb.F_Uint64Optional = Uint64(646464) - pb.F_FloatOptional = Float32(32.) - pb.F_DoubleOptional = Float64(64.) - pb.F_StringOptional = String("hello") - pb.F_BytesOptional = []byte("Bignose") - pb.F_Sint32Optional = Int32(-32) - pb.F_Sint64Optional = Int64(-64) - pb.Optionalgroup = initGoTest_OptionalGroup() - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" - "1807"+ // field 3, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "f00101"+ // field 30, encoding 0, value 1 - "f80120"+ // field 31, encoding 0, value 32 - "800240"+ // field 32, encoding 0, value 64 - "8d02a00c0000"+ // field 33, encoding 5, value 3232 - "91024019000000000000"+ // field 34, encoding 1, value 6464 - "9802a0dd13"+ // field 35, encoding 0, value 323232 - "a002c0ba27"+ // field 36, encoding 0, value 646464 - "ad0200000042"+ // field 37, encoding 5, value 32.0 - "b1020000000000005040"+ // field 38, encoding 1, value 64.0 - "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "d305"+ // start group field 90 level 1 - "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" - "d405"+ // end group field 90 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" - "f0123f"+ // field 302, encoding 0, value 63 - "f8127f"+ // field 303, encoding 0, value 127 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestEncodeDecode5(t *testing.T) { - pb := initGoTest(true) - pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} - pb.F_BoolRepeated = []bool{false, true} - pb.F_Int32Repeated = []int32{32, 33} - pb.F_Int64Repeated = []int64{64, 65} - pb.F_Fixed32Repeated = []uint32{3232, 3333} - pb.F_Fixed64Repeated = []uint64{6464, 6565} - pb.F_Uint32Repeated = []uint32{323232, 333333} - pb.F_Uint64Repeated = []uint64{646464, 656565} - pb.F_FloatRepeated = []float32{32., 33.} - pb.F_DoubleRepeated = []float64{64., 65.} - pb.F_StringRepeated = []string{"hello", "sailor"} - pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} - pb.F_Sint32Repeated = []int32{32, -32} - pb.F_Sint64Repeated = []int64{64, -64} - pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "a00100"+ // field 20, encoding 0, value 0 - "a00101"+ // field 20, encoding 0, value 1 - "a80120"+ // field 21, encoding 0, value 32 - "a80121"+ // field 21, encoding 0, value 33 - "b00140"+ // field 22, encoding 0, value 64 - "b00141"+ // field 22, encoding 0, value 65 - "bd01a00c0000"+ // field 23, encoding 5, value 3232 - "bd01050d0000"+ // field 23, encoding 5, value 3333 - "c1014019000000000000"+ // field 24, encoding 1, value 6464 - "c101a519000000000000"+ // field 24, encoding 1, value 6565 - "c801a0dd13"+ // field 25, encoding 0, value 323232 - "c80195ac14"+ // field 25, encoding 0, value 333333 - "d001c0ba27"+ // field 26, encoding 0, value 646464 - "d001b58928"+ // field 26, encoding 0, value 656565 - "dd0100000042"+ // field 27, encoding 5, value 32.0 - "dd0100000442"+ // field 27, encoding 5, value 33.0 - "e1010000000000005040"+ // field 28, encoding 1, value 64.0 - "e1010000000000405040"+ // field 28, encoding 1, value 65.0 - "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" - "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ca0c03"+"626967"+ // field 201, encoding 2, string "big" - "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" - "d00c40"+ // field 202, encoding 0, value 32 - "d00c3f"+ // field 202, encoding 0, value -32 - "d80c8001"+ // field 203, encoding 0, value 64 - "d80c7f"+ // field 203, encoding 0, value -64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, all packed repeated fields given two values. -func TestEncodeDecode6(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolRepeatedPacked = []bool{false, true} - pb.F_Int32RepeatedPacked = []int32{32, 33} - pb.F_Int64RepeatedPacked = []int64{64, 65} - pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} - pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} - pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} - pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} - pb.F_FloatRepeatedPacked = []float32{32., 33.} - pb.F_DoubleRepeatedPacked = []float64{64., 65.} - pb.F_Sint32RepeatedPacked = []int32{32, -32} - pb.F_Sint64RepeatedPacked = []int64{64, -64} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 - "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 - "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 - "aa0308"+ // field 53, encoding 2, 8 bytes - "a00c0000050d0000"+ // value 3232, value 3333 - "b20310"+ // field 54, encoding 2, 16 bytes - "4019000000000000a519000000000000"+ // value 6464, value 6565 - "ba0306"+ // field 55, encoding 2, 6 bytes - "a0dd1395ac14"+ // value 323232, value 333333 - "c20306"+ // field 56, encoding 2, 6 bytes - "c0ba27b58928"+ // value 646464, value 656565 - "ca0308"+ // field 57, encoding 2, 8 bytes - "0000004200000442"+ // value 32.0, value 33.0 - "d20310"+ // field 58, encoding 2, 16 bytes - "00000000000050400000000000405040"+ // value 64.0, value 65.0 - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "b21f02"+ // field 502, encoding 2, 2 bytes - "403f"+ // value 32, value -32 - "ba1f03"+ // field 503, encoding 2, 3 bytes - "80017f") // value 64, value -64 -} - -// Test that we can encode empty bytes fields. -func TestEncodeDecodeBytes1(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRequired = []byte{} - pb.F_BytesRepeated = [][]byte{{}} - pb.F_BytesOptional = []byte{} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { - t.Error("required empty bytes field is incorrect") - } - if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { - t.Error("repeated empty bytes field is incorrect") - } - if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { - t.Error("optional empty bytes field is incorrect") - } -} - -// Test that we encode nil-valued fields of a repeated bytes field correctly. -// Since entries in a repeated field cannot be nil, nil must mean empty value. -func TestEncodeDecodeBytes2(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRepeated = [][]byte{nil} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { - t.Error("Unexpected value for repeated bytes field") - } -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestSkippingUnrecognizedFields(t *testing.T) { - o := old() - pb := initGoTestField() - - // Marshal it normally. - o.Marshal(pb) - - // Now new a GoSkipTest record. - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - // Marshal it into same buffer. - o.Marshal(skip) - - pbd := new(GoTestField) - o.Unmarshal(pbd) - - // The __unrecognized field should be a marshaling of GoSkipTest - skipd := new(GoSkipTest) - - o.SetBuf(pbd.XXX_unrecognized) - o.Unmarshal(skipd) - - if *skipd.SkipInt32 != *skip.SkipInt32 { - t.Error("skip int32", skipd.SkipInt32) - } - if *skipd.SkipFixed32 != *skip.SkipFixed32 { - t.Error("skip fixed32", skipd.SkipFixed32) - } - if *skipd.SkipFixed64 != *skip.SkipFixed64 { - t.Error("skip fixed64", skipd.SkipFixed64) - } - if *skipd.SkipString != *skip.SkipString { - t.Error("skip string", *skipd.SkipString) - } - if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { - t.Error("skip group int32", skipd.Skipgroup.GroupInt32) - } - if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { - t.Error("skip group string", *skipd.Skipgroup.GroupString) - } -} - -// Check that unrecognized fields of a submessage are preserved. -func TestSubmessageUnrecognizedFields(t *testing.T) { - nm := &NewMessage{ - Nested: &NewMessage_Nested{ - Name: String("Nigel"), - FoodGroup: String("carbs"), - }, - } - b, err := Marshal(nm) - if err != nil { - t.Fatalf("Marshal of NewMessage: %v", err) - } - - // Unmarshal into an OldMessage. - om := new(OldMessage) - if err := Unmarshal(b, om); err != nil { - t.Fatalf("Unmarshal to OldMessage: %v", err) - } - exp := &OldMessage{ - Nested: &OldMessage_Nested{ - Name: String("Nigel"), - // normal protocol buffer users should not do this - XXX_unrecognized: []byte("\x12\x05carbs"), - }, - } - if !Equal(om, exp) { - t.Errorf("om = %v, want %v", om, exp) - } - - // Clone the OldMessage. - om = Clone(om).(*OldMessage) - if !Equal(om, exp) { - t.Errorf("Clone(om) = %v, want %v", om, exp) - } - - // Marshal the OldMessage, then unmarshal it into an empty NewMessage. - if b, err = Marshal(om); err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - t.Logf("Marshal(%v) -> %q", om, b) - nm2 := new(NewMessage) - if err := Unmarshal(b, nm2); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - if !Equal(nm, nm2) { - t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) - } -} - -// Check that an int32 field can be upgraded to an int64 field. -func TestNegativeInt32(t *testing.T) { - om := &OldMessage{ - Num: Int32(-1), - } - b, err := Marshal(om) - if err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - - // Check the size. It should be 11 bytes; - // 1 for the field/wire type, and 10 for the negative number. - if len(b) != 11 { - t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) - } - - // Unmarshal into a NewMessage. - nm := new(NewMessage) - if err := Unmarshal(b, nm); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - want := &NewMessage{ - Num: Int64(-1), - } - if !Equal(nm, want) { - t.Errorf("nm = %v, want %v", nm, want) - } -} - -// Check that we can grow an array (repeated field) to have many elements. -// This test doesn't depend only on our encoding; for variety, it makes sure -// we create, encode, and decode the correct contents explicitly. It's therefore -// a bit messier. -// This test also uses (and hence tests) the Marshal/Unmarshal functions -// instead of the methods. -func TestBigRepeated(t *testing.T) { - pb := initGoTest(true) - - // Create the arrays - const N = 50 // Internally the library starts much smaller. - pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) - pb.F_Sint64Repeated = make([]int64, N) - pb.F_Sint32Repeated = make([]int32, N) - pb.F_BytesRepeated = make([][]byte, N) - pb.F_StringRepeated = make([]string, N) - pb.F_DoubleRepeated = make([]float64, N) - pb.F_FloatRepeated = make([]float32, N) - pb.F_Uint64Repeated = make([]uint64, N) - pb.F_Uint32Repeated = make([]uint32, N) - pb.F_Fixed64Repeated = make([]uint64, N) - pb.F_Fixed32Repeated = make([]uint32, N) - pb.F_Int64Repeated = make([]int64, N) - pb.F_Int32Repeated = make([]int32, N) - pb.F_BoolRepeated = make([]bool, N) - pb.RepeatedField = make([]*GoTestField, N) - - // Fill in the arrays with checkable values. - igtf := initGoTestField() - igtrg := initGoTest_RepeatedGroup() - for i := 0; i < N; i++ { - pb.Repeatedgroup[i] = igtrg - pb.F_Sint64Repeated[i] = int64(i) - pb.F_Sint32Repeated[i] = int32(i) - s := fmt.Sprint(i) - pb.F_BytesRepeated[i] = []byte(s) - pb.F_StringRepeated[i] = s - pb.F_DoubleRepeated[i] = float64(i) - pb.F_FloatRepeated[i] = float32(i) - pb.F_Uint64Repeated[i] = uint64(i) - pb.F_Uint32Repeated[i] = uint32(i) - pb.F_Fixed64Repeated[i] = uint64(i) - pb.F_Fixed32Repeated[i] = uint32(i) - pb.F_Int64Repeated[i] = int64(i) - pb.F_Int32Repeated[i] = int32(i) - pb.F_BoolRepeated[i] = i%2 == 0 - pb.RepeatedField[i] = igtf - } - - // Marshal. - buf, _ := Marshal(pb) - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - Unmarshal(buf, pbd) - - // Check the checkable values - for i := uint64(0); i < N; i++ { - if pbd.Repeatedgroup[i] == nil { // TODO: more checking? - t.Error("pbd.Repeatedgroup bad") - } - var x uint64 - x = uint64(pbd.F_Sint64Repeated[i]) - if x != i { - t.Error("pbd.F_Sint64Repeated bad", x, i) - } - x = uint64(pbd.F_Sint32Repeated[i]) - if x != i { - t.Error("pbd.F_Sint32Repeated bad", x, i) - } - s := fmt.Sprint(i) - equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) - if pbd.F_StringRepeated[i] != s { - t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) - } - x = uint64(pbd.F_DoubleRepeated[i]) - if x != i { - t.Error("pbd.F_DoubleRepeated bad", x, i) - } - x = uint64(pbd.F_FloatRepeated[i]) - if x != i { - t.Error("pbd.F_FloatRepeated bad", x, i) - } - x = pbd.F_Uint64Repeated[i] - if x != i { - t.Error("pbd.F_Uint64Repeated bad", x, i) - } - x = uint64(pbd.F_Uint32Repeated[i]) - if x != i { - t.Error("pbd.F_Uint32Repeated bad", x, i) - } - x = pbd.F_Fixed64Repeated[i] - if x != i { - t.Error("pbd.F_Fixed64Repeated bad", x, i) - } - x = uint64(pbd.F_Fixed32Repeated[i]) - if x != i { - t.Error("pbd.F_Fixed32Repeated bad", x, i) - } - x = uint64(pbd.F_Int64Repeated[i]) - if x != i { - t.Error("pbd.F_Int64Repeated bad", x, i) - } - x = uint64(pbd.F_Int32Repeated[i]) - if x != i { - t.Error("pbd.F_Int32Repeated bad", x, i) - } - if pbd.F_BoolRepeated[i] != (i%2 == 0) { - t.Error("pbd.F_BoolRepeated bad", x, i) - } - if pbd.RepeatedField[i] == nil { // TODO: more checking? - t.Error("pbd.RepeatedField bad") - } - } -} - -// Verify we give a useful message when decoding to the wrong structure type. -func TestTypeMismatch(t *testing.T) { - pb1 := initGoTest(true) - - // Marshal - o := old() - o.Marshal(pb1) - - // Now Unmarshal it to the wrong type. - pb2 := initGoTestField() - err := o.Unmarshal(pb2) - if err == nil { - t.Error("expected error, got no error") - } else if !strings.Contains(err.Error(), "bad wiretype") { - t.Error("expected bad wiretype error, got", err) - } -} - -func encodeDecode(t *testing.T, in, out Message, msg string) { - buf, err := Marshal(in) - if err != nil { - t.Fatalf("failed marshaling %v: %v", msg, err) - } - if err := Unmarshal(buf, out); err != nil { - t.Fatalf("failed unmarshaling %v: %v", msg, err) - } -} - -func TestPackedNonPackedDecoderSwitching(t *testing.T) { - np, p := new(NonPackedTest), new(PackedTest) - - // non-packed -> packed - np.A = []int32{0, 1, 1, 2, 3, 5} - encodeDecode(t, np, p, "non-packed -> packed") - if !reflect.DeepEqual(np.A, p.B) { - t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) - } - - // packed -> non-packed - np.Reset() - p.B = []int32{3, 1, 4, 1, 5, 9} - encodeDecode(t, p, np, "packed -> non-packed") - if !reflect.DeepEqual(p.B, np.A) { - t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) - } -} - -func TestProto1RepeatedGroup(t *testing.T) { - pb := &MessageList{ - Message: []*MessageList_Message{ - { - Name: String("blah"), - Count: Int32(7), - }, - // NOTE: pb.Message[1] is a nil - nil, - }, - } - - o := old() - err := o.Marshal(pb) - if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { - t.Fatalf("unexpected or no error when marshaling: %v", err) - } -} - -// Test that enums work. Checks for a bug introduced by making enums -// named types instead of int32: newInt32FromUint64 would crash with -// a type mismatch in reflect.PointTo. -func TestEnum(t *testing.T) { - pb := new(GoEnum) - pb.Foo = FOO_FOO1.Enum() - o := old() - if err := o.Marshal(pb); err != nil { - t.Fatal("error encoding enum:", err) - } - pb1 := new(GoEnum) - if err := o.Unmarshal(pb1); err != nil { - t.Fatal("error decoding enum:", err) - } - if *pb1.Foo != FOO_FOO1 { - t.Error("expected 7 but got ", *pb1.Foo) - } -} - -// Enum types have String methods. Check that enum fields can be printed. -// We don't care what the value actually is, just as long as it doesn't crash. -func TestPrintingNilEnumFields(t *testing.T) { - pb := new(GoEnum) - _ = fmt.Sprintf("%+v", pb) -} - -// Verify that absent required fields cause Marshal/Unmarshal to return errors. -func TestRequiredFieldEnforcement(t *testing.T) { - pb := new(GoTestField) - _, err := Marshal(pb) - if err == nil { - t.Error("marshal: expected error, got nil") - } else if strings.Index(err.Error(), "Label") < 0 { - t.Errorf("marshal: bad error type: %v", err) - } - - // A slightly sneaky, yet valid, proto. It encodes the same required field twice, - // so simply counting the required fields is insufficient. - // field 1, encoding 2, value "hi" - buf := []byte("\x0A\x02hi\x0A\x02hi") - err = Unmarshal(buf, pb) - if err == nil { - t.Error("unmarshal: expected error, got nil") - } else if strings.Index(err.Error(), "{Unknown}") < 0 { - t.Errorf("unmarshal: bad error type: %v", err) - } -} - -func TestTypedNilMarshal(t *testing.T) { - // A typed nil should return ErrNil and not crash. - _, err := Marshal((*GoEnum)(nil)) - if err != ErrNil { - t.Errorf("Marshal: got err %v, want ErrNil", err) - } -} - -// A type that implements the Marshaler interface, but is not nillable. -type nonNillableInt uint64 - -func (nni nonNillableInt) Marshal() ([]byte, error) { - return EncodeVarint(uint64(nni)), nil -} - -type NNIMessage struct { - nni nonNillableInt -} - -func (*NNIMessage) Reset() {} -func (*NNIMessage) String() string { return "" } -func (*NNIMessage) ProtoMessage() {} - -// A type that implements the Marshaler interface and is nillable. -type nillableMessage struct { - x uint64 -} - -func (nm *nillableMessage) Marshal() ([]byte, error) { - return EncodeVarint(nm.x), nil -} - -type NMMessage struct { - nm *nillableMessage -} - -func (*NMMessage) Reset() {} -func (*NMMessage) String() string { return "" } -func (*NMMessage) ProtoMessage() {} - -// Verify a type that uses the Marshaler interface, but has a nil pointer. -func TestNilMarshaler(t *testing.T) { - // Try a struct with a Marshaler field that is nil. - // It should be directly marshable. - nmm := new(NMMessage) - if _, err := Marshal(nmm); err != nil { - t.Error("unexpected error marshaling nmm: ", err) - } - - // Try a struct with a Marshaler field that is not nillable. - nnim := new(NNIMessage) - nnim.nni = 7 - var _ Marshaler = nnim.nni // verify it is truly a Marshaler - if _, err := Marshal(nnim); err != nil { - t.Error("unexpected error marshaling nnim: ", err) - } -} - -func TestAllSetDefaults(t *testing.T) { - // Exercise SetDefaults with all scalar field types. - m := &Defaults{ - // NaN != NaN, so override that here. - F_Nan: Float32(1.7), - } - expected := &Defaults{ - F_Bool: Bool(true), - F_Int32: Int32(32), - F_Int64: Int64(64), - F_Fixed32: Uint32(320), - F_Fixed64: Uint64(640), - F_Uint32: Uint32(3200), - F_Uint64: Uint64(6400), - F_Float: Float32(314159), - F_Double: Float64(271828), - F_String: String(`hello, "world!"` + "\n"), - F_Bytes: []byte("Bignose"), - F_Sint32: Int32(-32), - F_Sint64: Int64(-64), - F_Enum: Defaults_GREEN.Enum(), - F_Pinf: Float32(float32(math.Inf(1))), - F_Ninf: Float32(float32(math.Inf(-1))), - F_Nan: Float32(1.7), - StrZero: String(""), - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithSetField(t *testing.T) { - // Check that a set value is not overridden. - m := &Defaults{ - F_Int32: Int32(12), - } - SetDefaults(m) - if v := m.GetF_Int32(); v != 12 { - t.Errorf("m.FInt32 = %v, want 12", v) - } -} - -func TestSetDefaultsWithSubMessage(t *testing.T) { - m := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - }, - } - expected := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - Port: Int32(4000), - }, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { - m := &MyMessage{ - RepInner: []*InnerMessage{{}}, - } - expected := &MyMessage{ - RepInner: []*InnerMessage{{ - Port: Int32(4000), - }}, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { - m := &MyMessage{ - Pet: []string{"turtle", "wombat"}, - } - expected := Clone(m) - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestMaximumTagNumber(t *testing.T) { - m := &MaxTag{ - LastField: String("natural goat essence"), - } - buf, err := Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal failed: %v", err) - } - m2 := new(MaxTag) - if err := Unmarshal(buf, m2); err != nil { - t.Fatalf("proto.Unmarshal failed: %v", err) - } - if got, want := m2.GetLastField(), *m.LastField; got != want { - t.Errorf("got %q, want %q", got, want) - } -} - -func TestJSON(t *testing.T) { - m := &MyMessage{ - Count: Int32(4), - Pet: []string{"bunny", "kitty"}, - Inner: &InnerMessage{ - Host: String("cauchy"), - }, - Bikeshed: MyMessage_GREEN.Enum(), - } - const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` - - b, err := json.Marshal(m) - if err != nil { - t.Fatalf("json.Marshal failed: %v", err) - } - s := string(b) - if s != expected { - t.Errorf("got %s\nwant %s", s, expected) - } - - received := new(MyMessage) - if err := json.Unmarshal(b, received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } - - // Test unmarshalling of JSON with symbolic enum name. - const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` - received.Reset() - if err := json.Unmarshal([]byte(old), received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } -} - -func TestBadWireType(t *testing.T) { - b := []byte{7<<3 | 6} // field 7, wire type 6 - pb := new(OtherMessage) - if err := Unmarshal(b, pb); err == nil { - t.Errorf("Unmarshal did not fail") - } else if !strings.Contains(err.Error(), "unknown wire type") { - t.Errorf("wrong error: %v", err) - } -} - -func TestBytesWithInvalidLength(t *testing.T) { - // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. - b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} - Unmarshal(b, new(MyMessage)) -} - -func TestLengthOverflow(t *testing.T) { - // Overflowing a length should not panic. - b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} - Unmarshal(b, new(MyMessage)) -} - -func TestVarintOverflow(t *testing.T) { - // Overflowing a 64-bit length should not be allowed. - b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} - if err := Unmarshal(b, new(MyMessage)); err == nil { - t.Fatalf("Overflowed uint64 length without error") - } -} - -func TestUnmarshalFuzz(t *testing.T) { - const N = 1000 - seed := time.Now().UnixNano() - t.Logf("RNG seed is %d", seed) - rng := rand.New(rand.NewSource(seed)) - buf := make([]byte, 20) - for i := 0; i < N; i++ { - for j := range buf { - buf[j] = byte(rng.Intn(256)) - } - fuzzUnmarshal(t, buf) - } -} - -func TestMergeMessages(t *testing.T) { - pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} - data, err := Marshal(pb) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - pb1 := new(MessageList) - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("first Unmarshal: %v", err) - } - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("second Unmarshal: %v", err) - } - if len(pb1.Message) != 1 { - t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) - } - - pb2 := new(MessageList) - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("first UnmarshalMerge: %v", err) - } - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("second UnmarshalMerge: %v", err) - } - if len(pb2.Message) != 2 { - t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) - } -} - -func TestExtensionMarshalOrder(t *testing.T) { - m := &MyMessage{Count: Int(123)} - if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { - t.Fatalf("SetExtension: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - var orig []byte - for i := 0; i < 100; i++ { - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if i == 0 { - orig = b - continue - } - if !bytes.Equal(b, orig) { - t.Errorf("Bytes differ on attempt #%d", i) - } - } -} - -// Many extensions, because small maps might not iterate differently on each iteration. -var exts = []*ExtensionDesc{ - E_X201, - E_X202, - E_X203, - E_X204, - E_X205, - E_X206, - E_X207, - E_X208, - E_X209, - E_X210, - E_X211, - E_X212, - E_X213, - E_X214, - E_X215, - E_X216, - E_X217, - E_X218, - E_X219, - E_X220, - E_X221, - E_X222, - E_X223, - E_X224, - E_X225, - E_X226, - E_X227, - E_X228, - E_X229, - E_X230, - E_X231, - E_X232, - E_X233, - E_X234, - E_X235, - E_X236, - E_X237, - E_X238, - E_X239, - E_X240, - E_X241, - E_X242, - E_X243, - E_X244, - E_X245, - E_X246, - E_X247, - E_X248, - E_X249, - E_X250, -} - -func TestMessageSetMarshalOrder(t *testing.T) { - m := &MyMessageSet{} - for _, x := range exts { - if err := SetExtension(m, x, &Empty{}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - } - - buf, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - for i := 0; i < 10; i++ { - b1, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if !bytes.Equal(b1, buf) { - t.Errorf("Bytes differ on re-Marshal #%d", i) - } - - m2 := &MyMessageSet{} - if err := Unmarshal(buf, m2); err != nil { - t.Errorf("Unmarshal: %v", err) - } - b2, err := Marshal(m2) - if err != nil { - t.Errorf("re-Marshal: %v", err) - } - if !bytes.Equal(b2, buf) { - t.Errorf("Bytes differ on round-trip #%d", i) - } - } -} - -func TestUnmarshalMergesMessages(t *testing.T) { - // If a nested message occurs twice in the input, - // the fields should be merged when decoding. - a := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("polhode"), - Port: Int32(1234), - }, - } - aData, err := Marshal(a) - if err != nil { - t.Fatalf("Marshal(a): %v", err) - } - b := &OtherMessage{ - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Connected: Bool(true), - }, - } - bData, err := Marshal(b) - if err != nil { - t.Fatalf("Marshal(b): %v", err) - } - want := &OtherMessage{ - Key: Int64(123), - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Port: Int32(1234), - Connected: Bool(true), - }, - } - got := new(OtherMessage) - if err := Unmarshal(append(aData, bData...), got); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !Equal(got, want) { - t.Errorf("\n got %v\nwant %v", got, want) - } -} - -func TestEncodingSizes(t *testing.T) { - tests := []struct { - m Message - n int - }{ - {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, - {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, - {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, - {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, - } - for _, test := range tests { - b, err := Marshal(test.m) - if err != nil { - t.Errorf("Marshal(%v): %v", test.m, err) - continue - } - if len(b) != test.n { - t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) - } - } -} - -func TestRequiredNotSetError(t *testing.T) { - pb := initGoTest(false) - pb.RequiredField.Label = nil - pb.F_Int32Required = nil - pb.F_Int64Required = nil - - expected := "0807" + // field 1, encoding 0, value 7 - "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) - "5001" + // field 10, encoding 0, value 1 - "6d20000000" + // field 13, encoding 5, value 0x20 - "714000000000000000" + // field 14, encoding 1, value 0x40 - "78a019" + // field 15, encoding 0, value 0xca0 = 3232 - "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45" + // field 17, encoding 5, value 3232.0 - "9101000000000040b940" + // field 18, encoding 1, value 6464.0 - "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" - "b304" + // field 70, encoding 3, start group - "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" - "b404" + // field 70, encoding 4, end group - "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" - "b0063f" + // field 102, encoding 0, 0x3f zigzag32 - "b8067f" // field 103, encoding 0, 0x7f zigzag64 - - o := old() - bytes, err := Marshal(pb) - if _, ok := err.(*RequiredNotSetError); !ok { - fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("expected = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-1 wrong err msg: %v", err) - } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 1", bytes) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = Unmarshal(bytes, pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { - t.Errorf("unmarshal wrong err msg: %v", err) - } - bytes, err = Marshal(pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-2 wrong err msg: %v", err) - } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 2", bytes) - t.Fatalf("string = %s", expected) - } -} - -func fuzzUnmarshal(t *testing.T, data []byte) { - defer func() { - if e := recover(); e != nil { - t.Errorf("These bytes caused a panic: %+v", data) - t.Logf("Stack:\n%s", debug.Stack()) - t.FailNow() - } - }() - - pb := new(MyMessage) - Unmarshal(data, pb) -} - -func TestMapFieldMarshal(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // b should be the concatenation of these three byte sequences in some order. - parts := []string{ - "\n\a\b\x01\x12\x03Rob", - "\n\a\b\x04\x12\x03Ian", - "\n\b\b\x08\x12\x04Dave", - } - ok := false - for i := range parts { - for j := range parts { - if j == i { - continue - } - for k := range parts { - if k == i || k == j { - continue - } - try := parts[i] + parts[j] + parts[k] - if bytes.Equal(b, []byte(try)) { - ok = true - break - } - } - } - } - if !ok { - t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) - } - t.Logf("FYI b: %q", b) - - (new(Buffer)).DebugPrint("Dump of b", b) -} - -func TestMapFieldRoundTrips(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - MsgMapping: map[int64]*FloatingPoint{ - 0x7001: {F: Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{ - false: []byte("that's not right!"), - true: []byte("aye, 'tis true!"), - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("FYI b: %q", b) - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - for _, pair := range [][2]interface{}{ - {m.NameMapping, m2.NameMapping}, - {m.MsgMapping, m2.MsgMapping}, - {m.ByteMapping, m2.ByteMapping}, - } { - if !reflect.DeepEqual(pair[0], pair[1]) { - t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) - } - } -} - -func TestMapFieldWithNil(t *testing.T) { - m := &MessageWithMap{ - MsgMapping: map[int64]*FloatingPoint{ - 1: nil, - }, - } - b, err := Marshal(m) - if err == nil { - t.Fatalf("Marshal of bad map should have failed, got these bytes: %v", b) - } -} - -// Benchmarks - -func testMsg() *GoTest { - pb := initGoTest(true) - const N = 1000 // Internally the library starts much smaller. - pb.F_Int32Repeated = make([]int32, N) - pb.F_DoubleRepeated = make([]float64, N) - for i := 0; i < N; i++ { - pb.F_Int32Repeated[i] = int32(i) - pb.F_DoubleRepeated[i] = float64(i) - } - return pb -} - -func bytesMsg() *GoTest { - pb := initGoTest(true) - buf := make([]byte, 4000) - for i := range buf { - buf[i] = byte(i) - } - pb.F_BytesDefaulted = buf - return pb -} - -func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { - d, _ := marshal(pb) - b.SetBytes(int64(len(d))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - marshal(pb) - } -} - -func benchmarkBufferMarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - p.Reset() - err := p.Marshal(pb0) - return p.Bytes(), err - }) -} - -func benchmarkSize(b *testing.B, pb Message) { - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - Size(pb) - return nil, nil - }) -} - -func newOf(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - return reflect.New(in.Type().Elem()).Interface().(Message) -} - -func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { - d, _ := Marshal(pb) - b.SetBytes(int64(len(d))) - pbd := newOf(pb) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - unmarshal(d, pbd) - } -} - -func benchmarkBufferUnmarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { - p.SetBuf(d) - return p.Unmarshal(pb0) - }) -} - -// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} - -func BenchmarkMarshal(b *testing.B) { - benchmarkMarshal(b, testMsg(), Marshal) -} - -func BenchmarkBufferMarshal(b *testing.B) { - benchmarkBufferMarshal(b, testMsg()) -} - -func BenchmarkSize(b *testing.B) { - benchmarkSize(b, testMsg()) -} - -func BenchmarkUnmarshal(b *testing.B) { - benchmarkUnmarshal(b, testMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshal(b *testing.B) { - benchmarkBufferUnmarshal(b, testMsg()) -} - -func BenchmarkMarshalBytes(b *testing.B) { - benchmarkMarshal(b, bytesMsg(), Marshal) -} - -func BenchmarkBufferMarshalBytes(b *testing.B) { - benchmarkBufferMarshal(b, bytesMsg()) -} - -func BenchmarkSizeBytes(b *testing.B) { - benchmarkSize(b, bytesMsg()) -} - -func BenchmarkUnmarshalBytes(b *testing.B) { - benchmarkUnmarshal(b, bytesMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshalBytes(b *testing.B) { - benchmarkBufferUnmarshal(b, bytesMsg()) -} - -func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { - b.StopTimer() - pb := initGoTestField() - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - pbd := new(GoTestField) - p := NewBuffer(nil) - p.Marshal(pb) - p.Marshal(skip) - p2 := NewBuffer(nil) - - b.StartTimer() - for i := 0; i < b.N; i++ { - p2.SetBuf(p.Bytes()) - p2.Unmarshal(pbd) - } -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone.go deleted file mode 100644 index 57297947b..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone.go +++ /dev/null @@ -1,217 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: MessageSet and RawMessage. - -package proto - -import ( - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") - } - if in.IsNil() { - // Merging nil into non-nil is a quiet no-op - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extensionsMap); ok { - emOut := out.Addr().Interface().(extensionsMap) - mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) - } else if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { - emOut := out.Addr().Interface().(extensionsBytes) - bIn := emIn.GetExtensions() - bOut := emOut.GetExtensions() - *bOut = append(*bOut, *bIn...) - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone_test.go deleted file mode 100644 index 7eef89ee0..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone_test.go +++ /dev/null @@ -1,245 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/gogo/protobuf/proto" - - proto3pb "github.com/gogo/protobuf/proto/proto3_proto" - pb "github.com/gogo/protobuf/proto/testdata" -) - -var cloneTestMessage = &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, -} - -func init() { - ext := &pb.Ext{ - Data: proto.String("extension"), - } - if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { - panic("SetExtension: " + err.Error()) - } -} - -func TestClone(t *testing.T) { - m := proto.Clone(cloneTestMessage).(*pb.MyMessage) - if !proto.Equal(m, cloneTestMessage) { - t.Errorf("Clone(%v) = %v", cloneTestMessage, m) - } - - // Verify it was a deep copy. - *m.Inner.Port++ - if proto.Equal(m, cloneTestMessage) { - t.Error("Mutating clone changed the original") - } - // Byte fields and repeated fields should be copied. - if &m.Pet[0] == &cloneTestMessage.Pet[0] { - t.Error("Pet: repeated field not copied") - } - if &m.Others[0] == &cloneTestMessage.Others[0] { - t.Error("Others: repeated field not copied") - } - if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { - t.Error("Others[0].Value: bytes field not copied") - } - if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { - t.Error("RepBytes: repeated field not copied") - } - if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { - t.Error("RepBytes[0]: bytes field not copied") - } -} - -func TestCloneNil(t *testing.T) { - var m *pb.MyMessage - if c := proto.Clone(m); !proto.Equal(m, c) { - t.Errorf("Clone(%v) = %v", m, c) - } -} - -var mergeTests = []struct { - src, dst, want proto.Message -}{ - { - src: &pb.MyMessage{ - Count: proto.Int32(42), - }, - dst: &pb.MyMessage{ - Name: proto.String("Dave"), - }, - want: &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - }, - }, - { - src: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - }, - Pet: []string{"horsey"}, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - }, - dst: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - { - // Explicitly test a src=nil field - Inner: nil, - }, - }, - }, - want: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty", "horsey"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - {}, - { - Value: []byte("some bytes"), - }, - }, - }, - }, - { - src: &pb.MyMessage{ - RepBytes: [][]byte{[]byte("wow")}, - }, - dst: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham")}, - }, - want: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, - }, - }, - // Check that a scalar bytes field replaces rather than appends. - { - src: &pb.OtherMessage{Value: []byte("foo")}, - dst: &pb.OtherMessage{Value: []byte("bar")}, - want: &pb.OtherMessage{Value: []byte("foo")}, - }, - { - src: &pb.MessageWithMap{ - NameMapping: map[int32]string{6: "Nigel"}, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: {F: proto.Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - dst: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Bruce", // should be overwritten - 7: "Andrew", - }, - }, - want: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Nigel", - 7: "Andrew", - }, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: {F: proto.Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - }, - // proto3 shouldn't merge zero values, - // in the same way that proto2 shouldn't merge nils. - { - src: &proto3pb.Message{ - Name: "Aaron", - Data: []byte(""), // zero value, but not nil - }, - dst: &proto3pb.Message{ - HeightInCm: 176, - Data: []byte("texas!"), - }, - want: &proto3pb.Message{ - Name: "Aaron", - HeightInCm: 176, - Data: []byte("texas!"), - }, - }, -} - -func TestMerge(t *testing.T) { - for _, m := range mergeTests { - got := proto.Clone(m.dst) - proto.Merge(got, m.src) - if !proto.Equal(got, m.want) { - t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) - } - } -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode.go deleted file mode 100644 index f7b1884b3..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode.go +++ /dev/null @@ -1,832 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - // x, n already 0 - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - // x, err already 0 - - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - return UnmarshalMerge(buf, pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - if ee, ok := e.(extensionsMap); ok { - ext := ee.ExtensionMap()[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - ee.ExtensionMap()[int32(tag)] = ext - } else if ee, ok := e.(extensionsBytes); ok { - ext := ee.GetExtensions() - *ext = append(*ext, o.buf[oi:o.index]...) - } - } - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - - y := *v - for i := 0; i < nb; i++ { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() || !valelem.IsValid() { - // We did not decode the key or the value in the map entry. - // Either way, it's an invalid map entry. - return fmt.Errorf("proto: bad map data: missing key/val") - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - - return err -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode_gogo.go deleted file mode 100644 index 6a77aad76..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode_gogo.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -// Decode a reference to a struct pointer. -func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - panic("not supported, since this is a pointer receiver") - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - bas := structPointer_FieldPointer(base, p.field) - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of references to struct pointers ([]struct). -func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { - newBas := appendStructPointer(base, p.field, p.sstype) - - if is_group { - panic("not supported, maybe in future, if requested.") - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - panic("not supported, since this is not a pointer receiver.") - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) - - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of references to struct pointers. -func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_ref_struct(p, false, base) -} - -func setPtrCustomType(base structPointer, f field, v interface{}) { - if v == nil { - return - } - structPointer_SetStructPointer(base, f, structPointer(reflect.ValueOf(v).Pointer())) -} - -func setCustomType(base structPointer, f field, value interface{}) { - if value == nil { - return - } - v := reflect.ValueOf(value).Elem() - t := reflect.TypeOf(value).Elem() - kind := t.Kind() - switch kind { - case reflect.Slice: - slice := reflect.MakeSlice(t, v.Len(), v.Cap()) - reflect.Copy(slice, v) - oldHeader := structPointer_GetSliceHeader(base, f) - oldHeader.Data = slice.Pointer() - oldHeader.Len = v.Len() - oldHeader.Cap = v.Cap() - default: - l := 1 - size := reflect.TypeOf(value).Elem().Size() - if kind == reflect.Array { - l = reflect.TypeOf(value).Elem().Len() - size = reflect.TypeOf(value).Size() - } - total := int(size) * l - structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), total) - } -} - -func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype.Elem()).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - setPtrCustomType(base, p.field, custom) - return nil -} - -func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - if custom != nil { - setCustomType(base, p.field, custom) - } - return nil -} - -// Decode a slice of bytes ([]byte) into a slice of custom types. -func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype.Elem()).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - newBas := appendStructPointer(base, p.field, p.ctype) - - setCustomType(newBas, 0, custom) - - return nil -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode.go deleted file mode 100644 index 91f3f0784..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode.go +++ /dev/null @@ -1,1293 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -func sizeFixed64(x uint64) int { - return 8 -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -func sizeFixed32(x uint64) int { - return 4 -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - var state errorState - if err != nil && !state.shouldContinue(err, nil) { - return nil, err - } - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - if err != nil { - return err - } - p.buf = append(p.buf, data...) - return nil - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - stats.Encode++ - } - - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - stats.Size++ - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += len(p.tagcode) - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - v := *structPointer_ExtMap(base, p.field) - if err := encodeExtensionMap(v); err != nil { - return err - } - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := *structPointer_ExtMap(base, p.field) - return sizeExtensionMap(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { - return err - } - return nil - } - - keys := v.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := v.MapIndex(key) - - // The only illegal map entry values are nil message pointers. - if val.Kind() == reflect.Ptr && val.IsNil() { - return errors.New("proto: map has nil element") - } - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode_gogo.go deleted file mode 100644 index f77cfb1ee..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode_gogo.go +++ /dev/null @@ -1,354 +0,0 @@ -// Extensions for Protocol Buffers to create more go like structures. -// -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// http://github.com/golang/protobuf/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -func NewRequiredNotSetError(field string) *RequiredNotSetError { - return &RequiredNotSetError{field} -} - -type Sizer interface { - Size() int -} - -func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, s...) - return nil -} - -func size_ext_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return 0 - } - n += len(s) - return -} - -// Encode a reference to bool pointer. -func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - x := 0 - if v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_bool(p *Properties, base structPointer) int { - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode a reference to int32 pointer. -func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a reference to an int64 pointer. -func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_ref_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a reference to a string pointer. -func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_ref_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// Encode a reference to a message struct. -func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetRefStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -//TODO this is only copied, please fix this -func size_ref_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetRefStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a slice of references to message struct pointers ([]struct). -func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { - var state errorState - ss := structPointer_GetStructPointer(base, p.field) - ss1 := structPointer_GetRefStructPointer(ss, field(0)) - size := p.stype.Size() - l := structPointer_Len(base, p.field) - for i := 0; i < l; i++ { - structp := structPointer_Add(ss1, field(uintptr(i)*size)) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - } - return state.err -} - -//TODO this is only copied, please fix this -func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { - ss := structPointer_GetStructPointer(base, p.field) - ss1 := structPointer_GetRefStructPointer(ss, field(0)) - size := p.stype.Size() - l := structPointer_Len(base, p.field) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := structPointer_Add(ss1, field(uintptr(i)*size)) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += len(p.tagcode) - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { - i := structPointer_InterfaceRef(base, p.field, p.ctype) - if i == nil { - return ErrNil - } - custom := i.(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if data == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_custom_bytes(p *Properties, base structPointer) (n int) { - n += len(p.tagcode) - i := structPointer_InterfaceRef(base, p.field, p.ctype) - if i == nil { - return 0 - } - custom := i.(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - return -} - -func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { - custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if data == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { - n += len(p.tagcode) - i := structPointer_InterfaceAt(base, p.field, p.ctype) - if i == nil { - return 0 - } - custom := i.(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - return -} - -func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { - inter := structPointer_InterfaceRef(base, p.field, p.ctype) - if inter == nil { - return ErrNil - } - slice := reflect.ValueOf(inter) - l := slice.Len() - for i := 0; i < l; i++ { - v := slice.Index(i) - custom := v.Interface().(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { - inter := structPointer_InterfaceRef(base, p.field, p.ctype) - if inter == nil { - return 0 - } - slice := reflect.ValueOf(inter) - l := slice.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - v := slice.Index(i) - custom := v.Interface().(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - } - return -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal.go deleted file mode 100644 index d8673a3e9..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal.go +++ /dev/null @@ -1,256 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. -// TODO: MessageSet. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal (a "bytes" field, - although represented by []byte, is not a repeated field) - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true -} - -// v1 and v2 are known to have the same type. -func equalAny(v1, v2 reflect.Value) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2) { - return false - } - } - return true - case reflect.Ptr: - return equalAny(v1.Elem(), v2.Elem()) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i)) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// em1 and em2 are extension maps. -func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { - return false - } - } - - return true -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal_test.go deleted file mode 100644 index ef6048008..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal_test.go +++ /dev/null @@ -1,191 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - . "github.com/gogo/protobuf/proto" - pb "github.com/gogo/protobuf/proto/testdata" -) - -// Four identical base messages. -// The init function adds extensions to some of them. -var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} - -// Two messages with non-message extensions. -var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} -var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)} - -func init() { - ext1 := &pb.Ext{Data: String("Kirk")} - ext2 := &pb.Ext{Data: String("Picard")} - - // messageWithExtension1a has ext1, but never marshals it. - if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil { - panic("SetExtension on 1a failed: " + err.Error()) - } - - // messageWithExtension1b is the unmarshaled form of messageWithExtension1a. - if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil { - panic("SetExtension on 1b failed: " + err.Error()) - } - buf, err := Marshal(messageWithExtension1b) - if err != nil { - panic("Marshal of 1b failed: " + err.Error()) - } - messageWithExtension1b.Reset() - if err := Unmarshal(buf, messageWithExtension1b); err != nil { - panic("Unmarshal of 1b failed: " + err.Error()) - } - - // messageWithExtension2 has ext2. - if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil { - panic("SetExtension on 2 failed: " + err.Error()) - } - - if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil { - panic("SetExtension on Int32-1 failed: " + err.Error()) - } - if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { - panic("SetExtension on Int32-2 failed: " + err.Error()) - } -} - -var EqualTests = []struct { - desc string - a, b Message - exp bool -}{ - {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false}, - {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true}, - {"nil vs nil", nil, nil, true}, - {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true}, - {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false}, - {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false}, - - {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false}, - {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false}, - {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false}, - {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true}, - - {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false}, - {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false}, - {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false}, - {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true}, - {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true}, - {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true}, - {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true}, - - { - "nested, different", - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}}, - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}}, - false, - }, - { - "nested, equal", - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, - true, - }, - - {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true}, - {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true}, - {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false}, - { - "repeated bytes", - &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, - &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, - true, - }, - - {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false}, - {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true}, - {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false}, - - {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, - {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, - - { - "message with group", - &pb.MyMessage{ - Count: Int32(1), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: Int32(5), - }, - }, - &pb.MyMessage{ - Count: Int32(1), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: Int32(5), - }, - }, - true, - }, - - { - "map same", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - true, - }, - { - "map different entry", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}}, - false, - }, - { - "map different key only", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}}, - false, - }, - { - "map different value only", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}}, - false, - }, -} - -func TestEqual(t *testing.T) { - for _, tc := range EqualTests { - if res := Equal(tc.a, tc.b); res != tc.exp { - t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp) - } - } -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions.go deleted file mode 100644 index 9a6374fdb..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions.go +++ /dev/null @@ -1,519 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange -} - -type extensionsMap interface { - extendableProto - ExtensionMap() map[int32]Extension -} - -type extensionsBytes interface { - extendableProto - GetExtensions() *[]byte -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base extendableProto, id int32, b []byte) { - if ebase, ok := base.(extensionsMap); ok { - ebase.ExtensionMap()[id] = Extension{enc: b} - } else if ebase, ok := base.(extensionsBytes); ok { - clearExtension(base, id) - ext := ebase.GetExtensions() - *ext = append(*ext, b...) - } else { - panic("unreachable") - } -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - // Check the extended type. - if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. -func encodeExtensionMap(m map[int32]Extension) error { - for k, e := range m { - err := encodeExtension(&e) - if err != nil { - return err - } - m[k] = e - } - return nil -} - -func encodeExtension(e *Extension) error { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - return nil - } - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - return nil -} - -func sizeExtensionMap(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - if epb, doki := pb.(extensionsMap); doki { - _, ok := epb.ExtensionMap()[extension.Field] - return ok - } else if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - buf := *ext - o := 0 - for o < len(buf) { - tag, n := DecodeVarint(buf[o:]) - fieldNum := int32(tag >> 3) - if int32(fieldNum) == extension.Field { - return true - } - wireType := int(tag & 0x7) - o += n - l, err := size(buf[o:], wireType) - if err != nil { - return false - } - o += l - } - return false - } - panic("unreachable") -} - -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} - -func clearExtension(pb extendableProto, fieldNum int32) { - if epb, doki := pb.(extensionsMap); doki { - delete(epb.ExtensionMap(), fieldNum) - } else if epb, doki := pb.(extensionsBytes); doki { - offset := 0 - for offset != -1 { - offset = deleteExtension(epb, fieldNum, offset) - } - } else { - panic("unreachable") - } -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb extendableProto, extension *ExtensionDesc) { - // TODO: Check types, field numbers, etc.? - clearExtension(pb, extension.Field) -} - -// GetExtension parses and returns the given extension of pb. -// If the extension is not present it returns ErrMissingExtension. -func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { - if err := checkExtensionTypes(pb, extension); err != nil { - return nil, err - } - - if epb, doki := pb.(extensionsMap); doki { - emap := epb.ExtensionMap() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil - } else if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - o := 0 - for o < len(*ext) { - tag, n := DecodeVarint((*ext)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size((*ext)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - v, err := decodeExtension((*ext)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) - } - panic("unreachable") -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - - t := reflect.TypeOf(extension.ExtensionType) - rep := extension.repeated() - - props := extensionProperties(extension) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. - value := reflect.New(t).Elem() - - for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err - } - - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if !rep || o.index >= len(o.buf) { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := pb.(extendableProto) - if !ok { - err = errors.New("proto: not an extendable proto") - return - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { - if err := checkExtensionTypes(pb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - return setExtension(pb, extension, value) -} - -func setExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { - if epb, doki := pb.(extensionsMap); doki { - epb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} - } else if epb, doki := pb.(extensionsBytes); doki { - ClearExtension(pb, extension) - ext := epb.GetExtensions() - et := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - p := NewBuffer(nil) - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - *ext = append(*ext, p.buf...) - } - return nil -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_gogo.go deleted file mode 100644 index bd55fb68b..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_gogo.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "sort" - "strings" -) - -func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool { - if reflect.ValueOf(pb).IsNil() { - return ifnotset - } - value, err := GetExtension(pb, extension) - if err != nil { - return ifnotset - } - if value == nil { - return ifnotset - } - if value.(*bool) == nil { - return ifnotset - } - return *(value.(*bool)) -} - -func (this *Extension) Equal(that *Extension) bool { - return bytes.Equal(this.enc, that.enc) -} - -func SizeOfExtensionMap(m map[int32]Extension) (n int) { - return sizeExtensionMap(m) -} - -type sortableMapElem struct { - field int32 - ext Extension -} - -func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { - s := make(sortableExtensions, 0, len(m)) - for k, v := range m { - s = append(s, &sortableMapElem{field: k, ext: v}) - } - return s -} - -type sortableExtensions []*sortableMapElem - -func (this sortableExtensions) Len() int { return len(this) } - -func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } - -func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } - -func (this sortableExtensions) String() string { - sort.Sort(this) - ss := make([]string, len(this)) - for i := range this { - ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) - } - return "map[" + strings.Join(ss, ",") + "]" -} - -func StringFromExtensionsMap(m map[int32]Extension) string { - return newSortableExtensionsFromMap(m).String() -} - -func StringFromExtensionsBytes(ext []byte) string { - m, err := BytesToExtensionsMap(ext) - if err != nil { - panic(err) - } - return StringFromExtensionsMap(m) -} - -func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - if err := encodeExtensionMap(m); err != nil { - return 0, err - } - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - for _, k := range keys { - n += copy(data[n:], m[int32(k)].enc) - } - return n, nil -} - -func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - if m[id].value == nil || m[id].desc == nil { - return m[id].enc, nil - } - if err := encodeExtensionMap(m); err != nil { - return nil, err - } - return m[id].enc, nil -} - -func size(buf []byte, wire int) (int, error) { - switch wire { - case WireVarint: - _, n := DecodeVarint(buf) - return n, nil - case WireFixed64: - return 8, nil - case WireBytes: - v, n := DecodeVarint(buf) - return int(v) + n, nil - case WireFixed32: - return 4, nil - case WireStartGroup: - offset := 0 - for { - u, n := DecodeVarint(buf[offset:]) - fwire := int(u & 0x7) - offset += n - if fwire == WireEndGroup { - return offset, nil - } - s, err := size(buf[offset:], wire) - if err != nil { - return 0, err - } - offset += s - } - } - return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) -} - -func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { - m := make(map[int32]Extension) - i := 0 - for i < len(buf) { - tag, n := DecodeVarint(buf[i:]) - if n <= 0 { - return nil, fmt.Errorf("unable to decode varint") - } - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size(buf[i+n:], wireType) - if err != nil { - return nil, err - } - end := i + int(l) + n - m[int32(fieldNum)] = Extension{enc: buf[i:end]} - i = end - } - return m, nil -} - -func NewExtension(e []byte) Extension { - ee := Extension{enc: make([]byte, len(e))} - copy(ee.enc, e) - return ee -} - -func (this Extension) GoString() string { - if this.enc == nil { - if err := encodeExtension(&this); err != nil { - panic(err) - } - } - return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) -} - -func SetUnsafeExtension(pb extendableProto, fieldNum int32, value interface{}) error { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return errors.New("proto: bad extension number; not in declared ranges") - } - return setExtension(pb, desc, value) -} - -func GetUnsafeExtension(pb extendableProto, fieldNum int32) (interface{}, error) { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return nil, fmt.Errorf("unregistered field number %d", fieldNum) - } - return GetExtension(pb, desc) -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_test.go deleted file mode 100644 index 86e3006d7..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_test.go +++ /dev/null @@ -1,292 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "fmt" - "reflect" - "testing" - - "github.com/gogo/protobuf/proto" - pb "github.com/gogo/protobuf/proto/testdata" -) - -func TestGetExtensionsWithMissingExtensions(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", ext1) - } - exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ - pb.E_Ext_More, - pb.E_Ext_Text, - }) - if err != nil { - t.Fatalf("GetExtensions() failed: %s", err) - } - if exts[0] != ext1 { - t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) - } - if exts[1] != nil { - t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) - } -} - -func TestGetExtensionStability(t *testing.T) { - check := func(m *pb.MyMessage) bool { - ext1, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - ext2, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - return ext1 == ext2 - } - msg := &pb.MyMessage{Count: proto.Int32(4)} - ext0 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { - t.Fatalf("Could not set ext1: %s", ext0) - } - if !check(msg) { - t.Errorf("GetExtension() not stable before marshaling") - } - bb, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("Marshal() failed: %s", err) - } - msg1 := &pb.MyMessage{} - err = proto.Unmarshal(bb, msg1) - if err != nil { - t.Fatalf("Unmarshal() failed: %s", err) - } - if !check(msg1) { - t.Errorf("GetExtension() not stable after unmarshaling") - } -} - -func TestGetExtensionDefaults(t *testing.T) { - var setFloat64 float64 = 1 - var setFloat32 float32 = 2 - var setInt32 int32 = 3 - var setInt64 int64 = 4 - var setUint32 uint32 = 5 - var setUint64 uint64 = 6 - var setBool = true - var setBool2 = false - var setString = "Goodnight string" - var setBytes = []byte("Goodnight bytes") - var setEnum = pb.DefaultsMessage_TWO - - type testcase struct { - ext *proto.ExtensionDesc // Extension we are testing. - want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail). - def interface{} // Expected value of extension after ClearExtension(). - } - tests := []testcase{ - {pb.E_NoDefaultDouble, setFloat64, nil}, - {pb.E_NoDefaultFloat, setFloat32, nil}, - {pb.E_NoDefaultInt32, setInt32, nil}, - {pb.E_NoDefaultInt64, setInt64, nil}, - {pb.E_NoDefaultUint32, setUint32, nil}, - {pb.E_NoDefaultUint64, setUint64, nil}, - {pb.E_NoDefaultSint32, setInt32, nil}, - {pb.E_NoDefaultSint64, setInt64, nil}, - {pb.E_NoDefaultFixed32, setUint32, nil}, - {pb.E_NoDefaultFixed64, setUint64, nil}, - {pb.E_NoDefaultSfixed32, setInt32, nil}, - {pb.E_NoDefaultSfixed64, setInt64, nil}, - {pb.E_NoDefaultBool, setBool, nil}, - {pb.E_NoDefaultBool, setBool2, nil}, - {pb.E_NoDefaultString, setString, nil}, - {pb.E_NoDefaultBytes, setBytes, nil}, - {pb.E_NoDefaultEnum, setEnum, nil}, - {pb.E_DefaultDouble, setFloat64, float64(3.1415)}, - {pb.E_DefaultFloat, setFloat32, float32(3.14)}, - {pb.E_DefaultInt32, setInt32, int32(42)}, - {pb.E_DefaultInt64, setInt64, int64(43)}, - {pb.E_DefaultUint32, setUint32, uint32(44)}, - {pb.E_DefaultUint64, setUint64, uint64(45)}, - {pb.E_DefaultSint32, setInt32, int32(46)}, - {pb.E_DefaultSint64, setInt64, int64(47)}, - {pb.E_DefaultFixed32, setUint32, uint32(48)}, - {pb.E_DefaultFixed64, setUint64, uint64(49)}, - {pb.E_DefaultSfixed32, setInt32, int32(50)}, - {pb.E_DefaultSfixed64, setInt64, int64(51)}, - {pb.E_DefaultBool, setBool, true}, - {pb.E_DefaultBool, setBool2, true}, - {pb.E_DefaultString, setString, "Hello, string"}, - {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, - {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, - } - - checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { - val, err := proto.GetExtension(msg, test.ext) - if err != nil { - if valWant != nil { - return fmt.Errorf("GetExtension(): %s", err) - } - if want := proto.ErrMissingExtension; err != want { - return fmt.Errorf("Unexpected error: got %v, want %v", err, want) - } - return nil - } - - // All proto2 extension values are either a pointer to a value or a slice of values. - ty := reflect.TypeOf(val) - tyWant := reflect.TypeOf(test.ext.ExtensionType) - if got, want := ty, tyWant; got != want { - return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) - } - tye := ty.Elem() - tyeWant := tyWant.Elem() - if got, want := tye, tyeWant; got != want { - return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) - } - - // Check the name of the type of the value. - // If it is an enum it will be type int32 with the name of the enum. - if got, want := tye.Name(), tye.Name(); got != want { - return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) - } - - // Check that value is what we expect. - // If we have a pointer in val, get the value it points to. - valExp := val - if ty.Kind() == reflect.Ptr { - valExp = reflect.ValueOf(val).Elem().Interface() - } - if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { - return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) - } - - return nil - } - - setTo := func(test testcase) interface{} { - setTo := reflect.ValueOf(test.want) - if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { - setTo = reflect.New(typ).Elem() - setTo.Set(reflect.New(setTo.Type().Elem())) - setTo.Elem().Set(reflect.ValueOf(test.want)) - } - return setTo.Interface() - } - - for _, test := range tests { - msg := &pb.DefaultsMessage{} - name := test.ext.Name - - // Check the initial value. - if err := checkVal(test, msg, test.def); err != nil { - t.Errorf("%s: %v", name, err) - } - - // Set the per-type value and check value. - name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) - if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { - t.Errorf("%s: SetExtension(): %v", name, err) - continue - } - if err := checkVal(test, msg, test.want); err != nil { - t.Errorf("%s: %v", name, err) - continue - } - - // Set and check the value. - name += " (cleared)" - proto.ClearExtension(msg, test.ext) - if err := checkVal(test, msg, test.def); err != nil { - t.Errorf("%s: %v", name, err) - } - } -} - -func TestExtensionsRoundTrip(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{ - Data: proto.String("hi"), - } - ext2 := &pb.Ext{ - Data: proto.String("there"), - } - exists := proto.HasExtension(msg, pb.E_Ext_More) - if exists { - t.Error("Extension More present unexpectedly") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Error(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { - t.Error(err) - } - e, err := proto.GetExtension(msg, pb.E_Ext_More) - if err != nil { - t.Error(err) - } - x, ok := e.(*pb.Ext) - if !ok { - t.Errorf("e has type %T, expected testdata.Ext", e) - } else if *x.Data != "there" { - t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) - } - proto.ClearExtension(msg, pb.E_Ext_More) - if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { - t.Errorf("got %v, expected ErrMissingExtension", e) - } - if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { - t.Error("expected bad extension error, got nil") - } - if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { - t.Error("expected extension err") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { - t.Error("expected some sort of type mismatch error, got nil") - } -} - -func TestNilExtension(t *testing.T) { - msg := &pb.MyMessage{ - Count: proto.Int32(1), - } - if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { - t.Fatal(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { - t.Error("expected SetExtension to fail due to a nil extension") - } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { - t.Errorf("expected error %v, got %v", want, err) - } - // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update - // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib.go deleted file mode 100644 index d36f9ad12..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib.go +++ /dev/null @@ -1,841 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/gogo/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - -package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // write point - - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - break - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - if err != nil { - fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - if err != nil { - fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib_gogo.go deleted file mode 100644 index a6c2c06b2..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib_gogo.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "encoding/json" - "strconv" -) - -func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { - s, ok := m[value] - if !ok { - s = strconv.Itoa(int(value)) - } - return json.Marshal(s) -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set.go deleted file mode 100644 index 9d912bce1..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set.go +++ /dev/null @@ -1,287 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" -) - -// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and MessageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. -// -// When a proto1 proto has a field that looks like: -// optional message info = 3; -// the protocol compiler produces a field in the generated struct that looks like: -// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"` -// The package is automatically inserted so there is no need for that proto file to -// import this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type MessageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure MessageSet is a Message. -var _ Message = (*MessageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *MessageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *MessageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false -} - -func (ms *MessageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return ErrNoMessageTypeId - } - return nil // TODO: return error instead? -} - -func (ms *MessageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return ErrNoMessageTypeId - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *MessageSet) Reset() { *ms = MessageSet{} } -func (ms *MessageSet) String() string { return CompactTextString(ms) } -func (*MessageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { - if err := encodeExtensionMap(m); err != nil { - return nil, err - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { - ms := new(MessageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set_test.go deleted file mode 100644 index 7c29bccf4..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "testing" -) - -func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { - // Check that a repeated message set entry will be concatenated. - in := &MessageSet{ - Item: []*_MessageSet_Item{ - {TypeId: Int32(12345), Message: []byte("hoo")}, - {TypeId: Int32(12345), Message: []byte("hah")}, - }, - } - b, err := Marshal(in) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("Marshaled bytes: %q", b) - - m := make(map[int32]Extension) - if err := UnmarshalMessageSet(b, m); err != nil { - t.Fatalf("UnmarshalMessageSet: %v", err) - } - ext, ok := m[12345] - if !ok { - t.Fatalf("Didn't retrieve extension 12345; map is %v", m) - } - // Skip wire type/field number and length varints. - got := skipVarint(skipVarint(ext.enc)) - if want := []byte("hoohah"); !bytes.Equal(got, want) { - t.Errorf("Combined extension is %q, want %q", got, want) - } -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 749919d25..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,479 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "math" - "reflect" -) - -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value -} - -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) -} - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value -} - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() -} - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } -} - -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value -} - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) - return - } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } -} - -func (p word64Slice) Len() int { - return p.v.Len() -} - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") -} - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index e9be0fe92..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,266 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != ^field(0) -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] -} - -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p -} - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x -} - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p -} - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] -} - -func word64_IsNil(p word64) bool { - return *p == nil -} - -func word64_Get(p word64) uint64 { - return **p -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x -} - -func word64Val_Get(p word64Val) uint64 { - return *p -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go deleted file mode 100644 index 6bc85fa98..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { - point := unsafe.Pointer(uintptr(p) + uintptr(f)) - r := reflect.NewAt(t, point) - return r.Interface() -} - -func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { - point := unsafe.Pointer(uintptr(p) + uintptr(f)) - r := reflect.NewAt(t, point) - if r.Elem().IsNil() { - return nil - } - return r.Elem().Interface() -} - -func copyUintPtr(oldptr, newptr uintptr, size int) { - oldbytes := make([]byte, 0) - oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) - oldslice.Data = oldptr - oldslice.Len = size - oldslice.Cap = size - newbytes := make([]byte, 0) - newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) - newslice.Data = newptr - newslice.Len = size - newslice.Cap = size - copy(newbytes, oldbytes) -} - -func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { - copyUintPtr(uintptr(oldptr), uintptr(newptr), size) -} - -func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { - size := typ.Elem().Size() - oldHeader := structPointer_GetSliceHeader(base, f) - newLen := oldHeader.Len + 1 - slice := reflect.MakeSlice(typ, newLen, newLen) - bas := toStructPointer(slice) - for i := 0; i < oldHeader.Len; i++ { - newElemptr := uintptr(bas) + uintptr(i)*size - oldElemptr := oldHeader.Data + uintptr(i)*size - copyUintPtr(oldElemptr, newElemptr, int(size)) - } - - oldHeader.Data = uintptr(bas) - oldHeader.Len = newLen - oldHeader.Cap = newLen - - return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) -} - -func structPointer_FieldPointer(p structPointer, f field) structPointer { - return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { - return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { - return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_Add(p structPointer, size field) structPointer { - return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) -} - -func structPointer_Len(p structPointer, f field) int { - return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties.go deleted file mode 100644 index 13245c00d..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties.go +++ /dev/null @@ -1,815 +0,0 @@ -// Extensions for Protocol Buffers to create more go like structures. -// -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only - - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sstype reflect.Type // set for slices of structs types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s = "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - if p.OrigName != p.Name { - s += ",name=" + p.OrigName - } - if p.proto3 { - s += ",proto3" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint - case "fixed32": - p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 - case "fixed64": - p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 - case "zigzag32": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 - case "zigzag64": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break - } - case strings.HasPrefix(f, "embedded="): - p.OrigName = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "customtype="): - p.CustomType = strings.Split(f, "=")[1] - } - } -} - -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - if len(p.CustomType) > 0 { - p.setCustomEncAndDec(typ) - p.setTag(lockGetProp) - return - } - switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - } else { - p.enc = (*Buffer).enc_ref_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_ref_bool - } - case reflect.Int32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - } else { - p.enc = (*Buffer).enc_ref_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_ref_int32 - } - case reflect.Uint32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - } else { - p.enc = (*Buffer).enc_ref_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_ref_uint32 - } - case reflect.Int64, reflect.Uint64: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - } else { - p.enc = (*Buffer).enc_ref_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_ref_int64 - } - case reflect.Float32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - } else { - p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_ref_uint32 - } - case reflect.Float64: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - } else { - p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_ref_int64 - } - case reflect.String: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - } else { - p.enc = (*Buffer).enc_ref_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_ref_string - } - case reflect.Struct: - p.stype = typ - p.isMarshaler = isMarshaler(typ) - p.isUnmarshaler = isUnmarshaler(typ) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_ref_struct_message - p.dec = (*Buffer).dec_ref_struct_message - p.size = size_ref_struct_message - } else { - fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) - } - - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: - p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } - } - - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_byte - p.dec = (*Buffer).dec_slice_byte - p.size = size_slice_byte - // This is a []byte, which is either a bytes field, - // or the value of a map field. In the latter case, - // we always encode an empty []byte, so we should not - // use the proto3 enc/size funcs. - // f == nil iff this is the key/value of a map field. - if p.proto3 && f != nil { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } - case reflect.Struct: - p.setSliceOfNonPointerStructs(t1) - } - - case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - p.setTag(lockGetProp) -} - -func (p *Properties) setTag(lockGetProp bool) { - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - return t.Implements(unmarshalerType) -} - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if f != nil { - p.field = toField(f) - } - if tag == "" { - return - } - p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) - prop.unrecField = invalidField - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - if f.Name == "XXX_extensions" { // special case - if len(f.Tag.Get("protobuf")) > 0 { - p.enc = (*Buffer).enc_ext_slice_byte - p.dec = nil // not needed - p.size = size_ext_slice_byte - } else { - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } - } - if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) -var enumStringMaps = make(map[string]map[int32]string) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap - if _, ok := enumStringMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumStringMaps[typeName] = unusedNameMap -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties_gogo.go deleted file mode 100644 index 8daf9f776..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties_gogo.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "os" - "reflect" -) - -func (p *Properties) setCustomEncAndDec(typ reflect.Type) { - p.ctype = typ - if p.Repeated { - p.enc = (*Buffer).enc_custom_slice_bytes - p.dec = (*Buffer).dec_custom_slice_bytes - p.size = size_custom_slice_bytes - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_custom_bytes - p.dec = (*Buffer).dec_custom_bytes - p.size = size_custom_bytes - } else { - p.enc = (*Buffer).enc_custom_ref_bytes - p.dec = (*Buffer).dec_custom_ref_bytes - p.size = size_custom_ref_bytes - } -} - -func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { - t2 := typ.Elem() - p.sstype = typ - p.stype = t2 - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - p.enc = (*Buffer).enc_slice_ref_struct_message - p.dec = (*Buffer).dec_slice_ref_struct_message - p.size = size_slice_ref_struct_message - if p.Wire != "bytes" { - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) - } -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_proto/proto3.pb.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_proto/proto3.pb.go deleted file mode 100644 index 2f2da4604..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_proto/proto3.pb.go +++ /dev/null @@ -1,122 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: proto3_proto/proto3.proto -// DO NOT EDIT! - -/* -Package proto3_proto is a generated protocol buffer package. - -It is generated from these files: - proto3_proto/proto3.proto - -It has these top-level messages: - Message - Nested - MessageWithMap -*/ -package proto3_proto - -import proto "github.com/gogo/protobuf/proto" -import testdata "github.com/gogo/protobuf/proto/testdata" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal - -type Message_Humour int32 - -const ( - Message_UNKNOWN Message_Humour = 0 - Message_PUNS Message_Humour = 1 - Message_SLAPSTICK Message_Humour = 2 - Message_BILL_BAILEY Message_Humour = 3 -) - -var Message_Humour_name = map[int32]string{ - 0: "UNKNOWN", - 1: "PUNS", - 2: "SLAPSTICK", - 3: "BILL_BAILEY", -} -var Message_Humour_value = map[string]int32{ - "UNKNOWN": 0, - "PUNS": 1, - "SLAPSTICK": 2, - "BILL_BAILEY": 3, -} - -func (x Message_Humour) String() string { - return proto.EnumName(Message_Humour_name, int32(x)) -} - -type Message struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,proto3,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` - HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,proto3" json:"height_in_cm,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` - ResultCount int64 `protobuf:"varint,7,opt,name=result_count,proto3" json:"result_count,omitempty"` - TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,proto3" json:"true_scotsman,omitempty"` - Score float32 `protobuf:"fixed32,9,opt,name=score,proto3" json:"score,omitempty"` - Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` - Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` - Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` - Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"` - Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} - -func (m *Message) GetNested() *Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *Message) GetTerrain() map[string]*Nested { - if m != nil { - return m.Terrain - } - return nil -} - -func (m *Message) GetProto2Field() *testdata.SubDefaults { - if m != nil { - return m.Proto2Field - } - return nil -} - -func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { - if m != nil { - return m.Proto2Value - } - return nil -} - -type Nested struct { - Bunny string `protobuf:"bytes,1,opt,name=bunny,proto3" json:"bunny,omitempty"` -} - -func (m *Nested) Reset() { *m = Nested{} } -func (m *Nested) String() string { return proto.CompactTextString(m) } -func (*Nested) ProtoMessage() {} - -type MessageWithMap struct { - ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} - -func (m *MessageWithMap) GetByteMapping() map[bool][]byte { - if m != nil { - return m.ByteMapping - } - return nil -} - -func init() { - proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_proto/proto3.proto b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_proto/proto3.proto deleted file mode 100644 index ca670015a..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_proto/proto3.proto +++ /dev/null @@ -1,68 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package proto3_proto; - -import "github.com/gogo/protobuf/proto/testdata/test.proto"; - -message Message { - enum Humour { - UNKNOWN = 0; - PUNS = 1; - SLAPSTICK = 2; - BILL_BAILEY = 3; - } - - string name = 1; - Humour hilarity = 2; - uint32 height_in_cm = 3; - bytes data = 4; - int64 result_count = 7; - bool true_scotsman = 8; - float score = 9; - - repeated uint64 key = 5; - Nested nested = 6; - - map terrain = 10; - testdata.SubDefaults proto2_field = 11; - map proto2_value = 13; -} - -message Nested { - string bunny = 1; -} - -message MessageWithMap { - map byte_mapping = 1; -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_test.go deleted file mode 100644 index 6f9cddc3f..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/proto3_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/gogo/protobuf/proto" - pb "github.com/gogo/protobuf/proto/proto3_proto" - tpb "github.com/gogo/protobuf/proto/testdata" -) - -func TestProto3ZeroValues(t *testing.T) { - tests := []struct { - desc string - m proto.Message - }{ - {"zero message", &pb.Message{}}, - {"empty bytes field", &pb.Message{Data: []byte{}}}, - } - for _, test := range tests { - b, err := proto.Marshal(test.m) - if err != nil { - t.Errorf("%s: proto.Marshal: %v", test.desc, err) - continue - } - if len(b) > 0 { - t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) - } - } -} - -func TestRoundTripProto3(t *testing.T) { - m := &pb.Message{ - Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" - Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 - HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 - Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" - ResultCount: 47, // (0 | 7<<3): 0x38 0x2f - TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 - Score: 8.1, // (5 | 9<<3): 0x4d <8.1> - - Key: []uint64{1, 0xdeadbeef}, - Nested: &pb.Nested{ - Bunny: "Monty", - }, - } - t.Logf(" m: %v", m) - - b, err := proto.Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal: %v", err) - } - t.Logf(" b: %q", b) - - m2 := new(pb.Message) - if err := proto.Unmarshal(b, m2); err != nil { - t.Fatalf("proto.Unmarshal: %v", err) - } - t.Logf("m2: %v", m2) - - if !proto.Equal(m, m2) { - t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) - } -} - -func TestProto3SetDefaults(t *testing.T) { - in := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: new(tpb.SubDefaults), - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": new(tpb.SubDefaults), - }, - } - - got := proto.Clone(in).(*pb.Message) - proto.SetDefaults(got) - - // There are no defaults in proto3. Everything should be the zero value, but - // we need to remember to set defaults for nested proto2 messages. - want := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": {N: proto.Int64(7)}, - }, - } - - if !proto.Equal(got, want) { - t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) - } -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/size2_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/size2_test.go deleted file mode 100644 index a2729c39a..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/size2_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "testing" -) - -// This is a separate file and package from size_test.go because that one uses -// generated messages and thus may not be in package proto without having a circular -// dependency, whereas this file tests unexported details of size.go. - -func TestVarintSize(t *testing.T) { - // Check the edge cases carefully. - testCases := []struct { - n uint64 - size int - }{ - {0, 1}, - {1, 1}, - {127, 1}, - {128, 2}, - {16383, 2}, - {16384, 3}, - {1<<63 - 1, 9}, - {1 << 63, 10}, - } - for _, tc := range testCases { - size := sizeVarint(tc.n) - if size != tc.size { - t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) - } - } -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/size_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/size_test.go deleted file mode 100644 index 457a479eb..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/size_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "log" - "strings" - "testing" - - . "github.com/gogo/protobuf/proto" - proto3pb "github.com/gogo/protobuf/proto/proto3_proto" - pb "github.com/gogo/protobuf/proto/testdata" -) - -var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} - -// messageWithExtension2 is in equal_test.go. -var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} - -func init() { - if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - - // Force messageWithExtension3 to have the extension encoded. - Marshal(messageWithExtension3) - -} - -var SizeTests = []struct { - desc string - pb Message -}{ - {"empty", &pb.OtherMessage{}}, - // Basic types. - {"bool", &pb.Defaults{F_Bool: Bool(true)}}, - {"int32", &pb.Defaults{F_Int32: Int32(12)}}, - {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, - {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, - {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, - {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, - {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, - {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, - {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, - {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, - {"float", &pb.Defaults{F_Float: Float32(12.6)}}, - {"double", &pb.Defaults{F_Double: Float64(13.9)}}, - {"string", &pb.Defaults{F_String: String("niles")}}, - {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, - {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, - {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, - {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, - {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, - // Repeated. - {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, - {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, - {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, - {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, - {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, - {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ - // Need enough large numbers to verify that the header is counting the number of bytes - // for the field, not the number of elements. - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - }}}, - {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, - {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, - // Nested. - {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, - {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, - // Other things. - {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, - {"extension (unencoded)", messageWithExtension1}, - {"extension (encoded)", messageWithExtension3}, - // proto3 message - {"proto3 empty", &proto3pb.Message{}}, - {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, - {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, - {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, - {"proto3 float", &proto3pb.Message{Score: 12.6}}, - {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, - {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, - {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, - {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, - {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: {}}}}, - - {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, - {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: {F: Float64(2.0)}}}}, - {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, - {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: {}}}}, - - {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, - {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, - {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, -} - -func TestSize(t *testing.T) { - for _, tc := range SizeTests { - size := Size(tc.pb) - b, err := Marshal(tc.pb) - if err != nil { - t.Errorf("%v: Marshal failed: %v", tc.desc, err) - continue - } - if size != len(b) { - t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) - t.Logf("%v: bytes: %#v", tc.desc, b) - } - } -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/skip_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/skip_gogo.go deleted file mode 100644 index 4fe7e0815..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/skip_gogo.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "io" -) - -func Skip(data []byte) (n int, err error) { - l := len(data) - index := 0 - for index < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - index++ - if data[index-1] < 0x80 { - break - } - } - return index, nil - case 1: - index += 8 - return index, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - index += length - return index, nil - case 3: - for { - var innerWire uint64 - var start int = index - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := Skip(data[start:]) - if err != nil { - return 0, err - } - index = start + next - } - return index, nil - case 4: - return index, nil - case 5: - index += 4 - return index, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/Makefile b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/Makefile deleted file mode 100644 index 1e676c37f..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -all: regenerate - -regenerate: - go install github.com/gogo/protobuf/protoc-gen-gogo/version/protoc-min-version - protoc-min-version --version="3.0.0" --gogo_out=. test.proto - diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/golden_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/golden_test.go deleted file mode 100644 index 8e8451537..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/golden_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Verify that the compiler output for test.proto is unchanged. - -package testdata - -import ( - "crypto/sha1" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "testing" -) - -// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. -func sum(t *testing.T, name string) string { - data, err := ioutil.ReadFile(name) - if err != nil { - t.Fatal(err) - } - t.Logf("sum(%q): length is %d", name, len(data)) - hash := sha1.New() - _, err = hash.Write(data) - if err != nil { - t.Fatal(err) - } - return fmt.Sprintf("% x", hash.Sum(nil)) -} - -func run(t *testing.T, name string, args ...string) { - cmd := exec.Command(name, args...) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - if err != nil { - t.Fatal(err) - } -} - -func TestGolden(t *testing.T) { - // Compute the original checksum. - goldenSum := sum(t, "test.pb.go") - // Run the proto compiler. - run(t, "protoc", "--gogo_out="+os.TempDir(), "test.proto") - newFile := filepath.Join(os.TempDir(), "test.pb.go") - defer os.Remove(newFile) - // Compute the new checksum. - newSum := sum(t, newFile) - // Verify - if newSum != goldenSum { - run(t, "diff", "-u", "test.pb.go", newFile) - t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") - } -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.pb.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.pb.go deleted file mode 100644 index 8bc688c2f..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.pb.go +++ /dev/null @@ -1,2746 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: test.proto -// DO NOT EDIT! - -/* -Package testdata is a generated protocol buffer package. - -It is generated from these files: - test.proto - -It has these top-level messages: - GoEnum - GoTestField - GoTest - GoSkipTest - NonPackedTest - PackedTest - MaxTag - OldMessage - NewMessage - InnerMessage - OtherMessage - MyMessage - Ext - DefaultsMessage - MyMessageSet - Empty - MessageList - Strings - Defaults - SubDefaults - RepeatedEnum - MoreRepeated - GroupOld - GroupNew - FloatingPoint - MessageWithMap -*/ -package testdata - -import proto "github.com/gogo/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -type FOO int32 - -const ( - FOO_FOO1 FOO = 1 -) - -var FOO_name = map[int32]string{ - 1: "FOO1", -} -var FOO_value = map[string]int32{ - "FOO1": 1, -} - -func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p -} -func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) -} -func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") - if err != nil { - return err - } - *x = FOO(value) - return nil -} - -// An enum, for completeness. -type GoTest_KIND int32 - -const ( - GoTest_VOID GoTest_KIND = 0 - // Basic types - GoTest_BOOL GoTest_KIND = 1 - GoTest_BYTES GoTest_KIND = 2 - GoTest_FINGERPRINT GoTest_KIND = 3 - GoTest_FLOAT GoTest_KIND = 4 - GoTest_INT GoTest_KIND = 5 - GoTest_STRING GoTest_KIND = 6 - GoTest_TIME GoTest_KIND = 7 - // Groupings - GoTest_TUPLE GoTest_KIND = 8 - GoTest_ARRAY GoTest_KIND = 9 - GoTest_MAP GoTest_KIND = 10 - // Table types - GoTest_TABLE GoTest_KIND = 11 - // Functions - GoTest_FUNCTION GoTest_KIND = 12 -) - -var GoTest_KIND_name = map[int32]string{ - 0: "VOID", - 1: "BOOL", - 2: "BYTES", - 3: "FINGERPRINT", - 4: "FLOAT", - 5: "INT", - 6: "STRING", - 7: "TIME", - 8: "TUPLE", - 9: "ARRAY", - 10: "MAP", - 11: "TABLE", - 12: "FUNCTION", -} -var GoTest_KIND_value = map[string]int32{ - "VOID": 0, - "BOOL": 1, - "BYTES": 2, - "FINGERPRINT": 3, - "FLOAT": 4, - "INT": 5, - "STRING": 6, - "TIME": 7, - "TUPLE": 8, - "ARRAY": 9, - "MAP": 10, - "TABLE": 11, - "FUNCTION": 12, -} - -func (x GoTest_KIND) Enum() *GoTest_KIND { - p := new(GoTest_KIND) - *p = x - return p -} -func (x GoTest_KIND) String() string { - return proto.EnumName(GoTest_KIND_name, int32(x)) -} -func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") - if err != nil { - return err - } - *x = GoTest_KIND(value) - return nil -} - -type MyMessage_Color int32 - -const ( - MyMessage_RED MyMessage_Color = 0 - MyMessage_GREEN MyMessage_Color = 1 - MyMessage_BLUE MyMessage_Color = 2 -) - -var MyMessage_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var MyMessage_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x MyMessage_Color) Enum() *MyMessage_Color { - p := new(MyMessage_Color) - *p = x - return p -} -func (x MyMessage_Color) String() string { - return proto.EnumName(MyMessage_Color_name, int32(x)) -} -func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") - if err != nil { - return err - } - *x = MyMessage_Color(value) - return nil -} - -type DefaultsMessage_DefaultsEnum int32 - -const ( - DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 - DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 - DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 -) - -var DefaultsMessage_DefaultsEnum_name = map[int32]string{ - 0: "ZERO", - 1: "ONE", - 2: "TWO", -} -var DefaultsMessage_DefaultsEnum_value = map[string]int32{ - "ZERO": 0, - "ONE": 1, - "TWO": 2, -} - -func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { - p := new(DefaultsMessage_DefaultsEnum) - *p = x - return p -} -func (x DefaultsMessage_DefaultsEnum) String() string { - return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) -} -func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") - if err != nil { - return err - } - *x = DefaultsMessage_DefaultsEnum(value) - return nil -} - -type Defaults_Color int32 - -const ( - Defaults_RED Defaults_Color = 0 - Defaults_GREEN Defaults_Color = 1 - Defaults_BLUE Defaults_Color = 2 -) - -var Defaults_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var Defaults_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x Defaults_Color) Enum() *Defaults_Color { - p := new(Defaults_Color) - *p = x - return p -} -func (x Defaults_Color) String() string { - return proto.EnumName(Defaults_Color_name, int32(x)) -} -func (x *Defaults_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") - if err != nil { - return err - } - *x = Defaults_Color(value) - return nil -} - -type RepeatedEnum_Color int32 - -const ( - RepeatedEnum_RED RepeatedEnum_Color = 1 -) - -var RepeatedEnum_Color_name = map[int32]string{ - 1: "RED", -} -var RepeatedEnum_Color_value = map[string]int32{ - "RED": 1, -} - -func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { - p := new(RepeatedEnum_Color) - *p = x - return p -} -func (x RepeatedEnum_Color) String() string { - return proto.EnumName(RepeatedEnum_Color_name, int32(x)) -} -func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") - if err != nil { - return err - } - *x = RepeatedEnum_Color(value) - return nil -} - -type GoEnum struct { - Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoEnum) Reset() { *m = GoEnum{} } -func (m *GoEnum) String() string { return proto.CompactTextString(m) } -func (*GoEnum) ProtoMessage() {} - -func (m *GoEnum) GetFoo() FOO { - if m != nil && m.Foo != nil { - return *m.Foo - } - return FOO_FOO1 -} - -type GoTestField struct { - Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"` - Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTestField) Reset() { *m = GoTestField{} } -func (m *GoTestField) String() string { return proto.CompactTextString(m) } -func (*GoTestField) ProtoMessage() {} - -func (m *GoTestField) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" -} - -func (m *GoTestField) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -type GoTest struct { - // Some typical parameters - Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` - Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"` - Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"` - // Required, repeated and optional foreign fields. - RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"` - RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"` - OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"` - // Required fields of all basic types - F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` - F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` - F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` - F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` - F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` - F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` - F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` - F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` - F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` - F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` - F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` - F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` - F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` - // Repeated fields of all basic types - F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` - F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` - F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` - F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` - F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` - F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` - F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` - F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` - F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` - F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` - F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` - F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` - F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` - // Optional fields of all basic types - F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` - F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` - F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` - F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` - F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` - F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` - F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` - F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` - F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` - F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` - F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` - F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` - F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` - // Default-valued fields of all basic types - F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` - F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` - F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` - F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` - F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` - F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` - F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` - F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` - F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` - F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` - F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` - F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` - F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` - // Packed repeated fields (no string or bytes). - F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` - F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` - F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` - F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` - F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` - F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` - F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` - F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` - F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` - F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` - F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` - Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` - Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` - Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest) Reset() { *m = GoTest{} } -func (m *GoTest) String() string { return proto.CompactTextString(m) } -func (*GoTest) ProtoMessage() {} - -const Default_GoTest_F_BoolDefaulted bool = true -const Default_GoTest_F_Int32Defaulted int32 = 32 -const Default_GoTest_F_Int64Defaulted int64 = 64 -const Default_GoTest_F_Fixed32Defaulted uint32 = 320 -const Default_GoTest_F_Fixed64Defaulted uint64 = 640 -const Default_GoTest_F_Uint32Defaulted uint32 = 3200 -const Default_GoTest_F_Uint64Defaulted uint64 = 6400 -const Default_GoTest_F_FloatDefaulted float32 = 314159 -const Default_GoTest_F_DoubleDefaulted float64 = 271828 -const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" - -var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") - -const Default_GoTest_F_Sint32Defaulted int32 = -32 -const Default_GoTest_F_Sint64Defaulted int64 = -64 - -func (m *GoTest) GetKind() GoTest_KIND { - if m != nil && m.Kind != nil { - return *m.Kind - } - return GoTest_VOID -} - -func (m *GoTest) GetTable() string { - if m != nil && m.Table != nil { - return *m.Table - } - return "" -} - -func (m *GoTest) GetParam() int32 { - if m != nil && m.Param != nil { - return *m.Param - } - return 0 -} - -func (m *GoTest) GetRequiredField() *GoTestField { - if m != nil { - return m.RequiredField - } - return nil -} - -func (m *GoTest) GetRepeatedField() []*GoTestField { - if m != nil { - return m.RepeatedField - } - return nil -} - -func (m *GoTest) GetOptionalField() *GoTestField { - if m != nil { - return m.OptionalField - } - return nil -} - -func (m *GoTest) GetF_BoolRequired() bool { - if m != nil && m.F_BoolRequired != nil { - return *m.F_BoolRequired - } - return false -} - -func (m *GoTest) GetF_Int32Required() int32 { - if m != nil && m.F_Int32Required != nil { - return *m.F_Int32Required - } - return 0 -} - -func (m *GoTest) GetF_Int64Required() int64 { - if m != nil && m.F_Int64Required != nil { - return *m.F_Int64Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Required() uint32 { - if m != nil && m.F_Fixed32Required != nil { - return *m.F_Fixed32Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Required() uint64 { - if m != nil && m.F_Fixed64Required != nil { - return *m.F_Fixed64Required - } - return 0 -} - -func (m *GoTest) GetF_Uint32Required() uint32 { - if m != nil && m.F_Uint32Required != nil { - return *m.F_Uint32Required - } - return 0 -} - -func (m *GoTest) GetF_Uint64Required() uint64 { - if m != nil && m.F_Uint64Required != nil { - return *m.F_Uint64Required - } - return 0 -} - -func (m *GoTest) GetF_FloatRequired() float32 { - if m != nil && m.F_FloatRequired != nil { - return *m.F_FloatRequired - } - return 0 -} - -func (m *GoTest) GetF_DoubleRequired() float64 { - if m != nil && m.F_DoubleRequired != nil { - return *m.F_DoubleRequired - } - return 0 -} - -func (m *GoTest) GetF_StringRequired() string { - if m != nil && m.F_StringRequired != nil { - return *m.F_StringRequired - } - return "" -} - -func (m *GoTest) GetF_BytesRequired() []byte { - if m != nil { - return m.F_BytesRequired - } - return nil -} - -func (m *GoTest) GetF_Sint32Required() int32 { - if m != nil && m.F_Sint32Required != nil { - return *m.F_Sint32Required - } - return 0 -} - -func (m *GoTest) GetF_Sint64Required() int64 { - if m != nil && m.F_Sint64Required != nil { - return *m.F_Sint64Required - } - return 0 -} - -func (m *GoTest) GetF_BoolRepeated() []bool { - if m != nil { - return m.F_BoolRepeated - } - return nil -} - -func (m *GoTest) GetF_Int32Repeated() []int32 { - if m != nil { - return m.F_Int32Repeated - } - return nil -} - -func (m *GoTest) GetF_Int64Repeated() []int64 { - if m != nil { - return m.F_Int64Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed32Repeated() []uint32 { - if m != nil { - return m.F_Fixed32Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed64Repeated() []uint64 { - if m != nil { - return m.F_Fixed64Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint32Repeated() []uint32 { - if m != nil { - return m.F_Uint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint64Repeated() []uint64 { - if m != nil { - return m.F_Uint64Repeated - } - return nil -} - -func (m *GoTest) GetF_FloatRepeated() []float32 { - if m != nil { - return m.F_FloatRepeated - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeated() []float64 { - if m != nil { - return m.F_DoubleRepeated - } - return nil -} - -func (m *GoTest) GetF_StringRepeated() []string { - if m != nil { - return m.F_StringRepeated - } - return nil -} - -func (m *GoTest) GetF_BytesRepeated() [][]byte { - if m != nil { - return m.F_BytesRepeated - } - return nil -} - -func (m *GoTest) GetF_Sint32Repeated() []int32 { - if m != nil { - return m.F_Sint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Sint64Repeated() []int64 { - if m != nil { - return m.F_Sint64Repeated - } - return nil -} - -func (m *GoTest) GetF_BoolOptional() bool { - if m != nil && m.F_BoolOptional != nil { - return *m.F_BoolOptional - } - return false -} - -func (m *GoTest) GetF_Int32Optional() int32 { - if m != nil && m.F_Int32Optional != nil { - return *m.F_Int32Optional - } - return 0 -} - -func (m *GoTest) GetF_Int64Optional() int64 { - if m != nil && m.F_Int64Optional != nil { - return *m.F_Int64Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Optional() uint32 { - if m != nil && m.F_Fixed32Optional != nil { - return *m.F_Fixed32Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Optional() uint64 { - if m != nil && m.F_Fixed64Optional != nil { - return *m.F_Fixed64Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint32Optional() uint32 { - if m != nil && m.F_Uint32Optional != nil { - return *m.F_Uint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint64Optional() uint64 { - if m != nil && m.F_Uint64Optional != nil { - return *m.F_Uint64Optional - } - return 0 -} - -func (m *GoTest) GetF_FloatOptional() float32 { - if m != nil && m.F_FloatOptional != nil { - return *m.F_FloatOptional - } - return 0 -} - -func (m *GoTest) GetF_DoubleOptional() float64 { - if m != nil && m.F_DoubleOptional != nil { - return *m.F_DoubleOptional - } - return 0 -} - -func (m *GoTest) GetF_StringOptional() string { - if m != nil && m.F_StringOptional != nil { - return *m.F_StringOptional - } - return "" -} - -func (m *GoTest) GetF_BytesOptional() []byte { - if m != nil { - return m.F_BytesOptional - } - return nil -} - -func (m *GoTest) GetF_Sint32Optional() int32 { - if m != nil && m.F_Sint32Optional != nil { - return *m.F_Sint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Sint64Optional() int64 { - if m != nil && m.F_Sint64Optional != nil { - return *m.F_Sint64Optional - } - return 0 -} - -func (m *GoTest) GetF_BoolDefaulted() bool { - if m != nil && m.F_BoolDefaulted != nil { - return *m.F_BoolDefaulted - } - return Default_GoTest_F_BoolDefaulted -} - -func (m *GoTest) GetF_Int32Defaulted() int32 { - if m != nil && m.F_Int32Defaulted != nil { - return *m.F_Int32Defaulted - } - return Default_GoTest_F_Int32Defaulted -} - -func (m *GoTest) GetF_Int64Defaulted() int64 { - if m != nil && m.F_Int64Defaulted != nil { - return *m.F_Int64Defaulted - } - return Default_GoTest_F_Int64Defaulted -} - -func (m *GoTest) GetF_Fixed32Defaulted() uint32 { - if m != nil && m.F_Fixed32Defaulted != nil { - return *m.F_Fixed32Defaulted - } - return Default_GoTest_F_Fixed32Defaulted -} - -func (m *GoTest) GetF_Fixed64Defaulted() uint64 { - if m != nil && m.F_Fixed64Defaulted != nil { - return *m.F_Fixed64Defaulted - } - return Default_GoTest_F_Fixed64Defaulted -} - -func (m *GoTest) GetF_Uint32Defaulted() uint32 { - if m != nil && m.F_Uint32Defaulted != nil { - return *m.F_Uint32Defaulted - } - return Default_GoTest_F_Uint32Defaulted -} - -func (m *GoTest) GetF_Uint64Defaulted() uint64 { - if m != nil && m.F_Uint64Defaulted != nil { - return *m.F_Uint64Defaulted - } - return Default_GoTest_F_Uint64Defaulted -} - -func (m *GoTest) GetF_FloatDefaulted() float32 { - if m != nil && m.F_FloatDefaulted != nil { - return *m.F_FloatDefaulted - } - return Default_GoTest_F_FloatDefaulted -} - -func (m *GoTest) GetF_DoubleDefaulted() float64 { - if m != nil && m.F_DoubleDefaulted != nil { - return *m.F_DoubleDefaulted - } - return Default_GoTest_F_DoubleDefaulted -} - -func (m *GoTest) GetF_StringDefaulted() string { - if m != nil && m.F_StringDefaulted != nil { - return *m.F_StringDefaulted - } - return Default_GoTest_F_StringDefaulted -} - -func (m *GoTest) GetF_BytesDefaulted() []byte { - if m != nil && m.F_BytesDefaulted != nil { - return m.F_BytesDefaulted - } - return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) -} - -func (m *GoTest) GetF_Sint32Defaulted() int32 { - if m != nil && m.F_Sint32Defaulted != nil { - return *m.F_Sint32Defaulted - } - return Default_GoTest_F_Sint32Defaulted -} - -func (m *GoTest) GetF_Sint64Defaulted() int64 { - if m != nil && m.F_Sint64Defaulted != nil { - return *m.F_Sint64Defaulted - } - return Default_GoTest_F_Sint64Defaulted -} - -func (m *GoTest) GetF_BoolRepeatedPacked() []bool { - if m != nil { - return m.F_BoolRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { - if m != nil { - return m.F_Int32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { - if m != nil { - return m.F_Int64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Fixed32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Fixed64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Uint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Uint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { - if m != nil { - return m.F_FloatRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { - if m != nil { - return m.F_DoubleRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { - if m != nil { - return m.F_Sint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { - if m != nil { - return m.F_Sint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { - if m != nil { - return m.Requiredgroup - } - return nil -} - -func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { - if m != nil { - return m.Repeatedgroup - } - return nil -} - -func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil -} - -// Required, repeated, and optional groups. -type GoTest_RequiredGroup struct { - RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } -func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RequiredGroup) ProtoMessage() {} - -func (m *GoTest_RequiredGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_RepeatedGroup struct { - RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } -func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RepeatedGroup) ProtoMessage() {} - -func (m *GoTest_RepeatedGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } -func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_OptionalGroup) ProtoMessage() {} - -func (m *GoTest_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -type GoSkipTest struct { - SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` - SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` - SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` - SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` - Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } -func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest) ProtoMessage() {} - -func (m *GoSkipTest) GetSkipInt32() int32 { - if m != nil && m.SkipInt32 != nil { - return *m.SkipInt32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed32() uint32 { - if m != nil && m.SkipFixed32 != nil { - return *m.SkipFixed32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed64() uint64 { - if m != nil && m.SkipFixed64 != nil { - return *m.SkipFixed64 - } - return 0 -} - -func (m *GoSkipTest) GetSkipString() string { - if m != nil && m.SkipString != nil { - return *m.SkipString - } - return "" -} - -func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { - if m != nil { - return m.Skipgroup - } - return nil -} - -type GoSkipTest_SkipGroup struct { - GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` - GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } -func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest_SkipGroup) ProtoMessage() {} - -func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { - if m != nil && m.GroupInt32 != nil { - return *m.GroupInt32 - } - return 0 -} - -func (m *GoSkipTest_SkipGroup) GetGroupString() string { - if m != nil && m.GroupString != nil { - return *m.GroupString - } - return "" -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -type NonPackedTest struct { - A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } -func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } -func (*NonPackedTest) ProtoMessage() {} - -func (m *NonPackedTest) GetA() []int32 { - if m != nil { - return m.A - } - return nil -} - -type PackedTest struct { - B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PackedTest) Reset() { *m = PackedTest{} } -func (m *PackedTest) String() string { return proto.CompactTextString(m) } -func (*PackedTest) ProtoMessage() {} - -func (m *PackedTest) GetB() []int32 { - if m != nil { - return m.B - } - return nil -} - -type MaxTag struct { - // Maximum possible tag number. - LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MaxTag) Reset() { *m = MaxTag{} } -func (m *MaxTag) String() string { return proto.CompactTextString(m) } -func (*MaxTag) ProtoMessage() {} - -func (m *MaxTag) GetLastField() string { - if m != nil && m.LastField != nil { - return *m.LastField - } - return "" -} - -type OldMessage struct { - Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage) Reset() { *m = OldMessage{} } -func (m *OldMessage) String() string { return proto.CompactTextString(m) } -func (*OldMessage) ProtoMessage() {} - -func (m *OldMessage) GetNested() *OldMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *OldMessage) GetNum() int32 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type OldMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } -func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*OldMessage_Nested) ProtoMessage() {} - -func (m *OldMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -type NewMessage struct { - Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - // This is an int32 in OldMessage. - Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage) Reset() { *m = NewMessage{} } -func (m *NewMessage) String() string { return proto.CompactTextString(m) } -func (*NewMessage) ProtoMessage() {} - -func (m *NewMessage) GetNested() *NewMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *NewMessage) GetNum() int64 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type NewMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } -func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*NewMessage_Nested) ProtoMessage() {} - -func (m *NewMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *NewMessage_Nested) GetFoodGroup() string { - if m != nil && m.FoodGroup != nil { - return *m.FoodGroup - } - return "" -} - -type InnerMessage struct { - Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` - Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` - Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *InnerMessage) Reset() { *m = InnerMessage{} } -func (m *InnerMessage) String() string { return proto.CompactTextString(m) } -func (*InnerMessage) ProtoMessage() {} - -const Default_InnerMessage_Port int32 = 4000 - -func (m *InnerMessage) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *InnerMessage) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return Default_InnerMessage_Port -} - -func (m *InnerMessage) GetConnected() bool { - if m != nil && m.Connected != nil { - return *m.Connected - } - return false -} - -type OtherMessage struct { - Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` - Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OtherMessage) Reset() { *m = OtherMessage{} } -func (m *OtherMessage) String() string { return proto.CompactTextString(m) } -func (*OtherMessage) ProtoMessage() {} - -func (m *OtherMessage) GetKey() int64 { - if m != nil && m.Key != nil { - return *m.Key - } - return 0 -} - -func (m *OtherMessage) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *OtherMessage) GetWeight() float32 { - if m != nil && m.Weight != nil { - return *m.Weight - } - return 0 -} - -func (m *OtherMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -type MyMessage struct { - Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` - Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` - Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` - Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` - RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"` - Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` - Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` - // This field becomes [][]byte in the generated code. - RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` - Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage) Reset() { *m = MyMessage{} } -func (m *MyMessage) String() string { return proto.CompactTextString(m) } -func (*MyMessage) ProtoMessage() {} - -var extRange_MyMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessage -} -func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -func (m *MyMessage) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *MyMessage) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MyMessage) GetQuote() string { - if m != nil && m.Quote != nil { - return *m.Quote - } - return "" -} - -func (m *MyMessage) GetPet() []string { - if m != nil { - return m.Pet - } - return nil -} - -func (m *MyMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -func (m *MyMessage) GetOthers() []*OtherMessage { - if m != nil { - return m.Others - } - return nil -} - -func (m *MyMessage) GetRepInner() []*InnerMessage { - if m != nil { - return m.RepInner - } - return nil -} - -func (m *MyMessage) GetBikeshed() MyMessage_Color { - if m != nil && m.Bikeshed != nil { - return *m.Bikeshed - } - return MyMessage_RED -} - -func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { - if m != nil { - return m.Somegroup - } - return nil -} - -func (m *MyMessage) GetRepBytes() [][]byte { - if m != nil { - return m.RepBytes - } - return nil -} - -func (m *MyMessage) GetBigfloat() float64 { - if m != nil && m.Bigfloat != nil { - return *m.Bigfloat - } - return 0 -} - -type MyMessage_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } -func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } -func (*MyMessage_SomeGroup) ProtoMessage() {} - -func (m *MyMessage_SomeGroup) GetGroupField() int32 { - if m != nil && m.GroupField != nil { - return *m.GroupField - } - return 0 -} - -type Ext struct { - Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Ext) Reset() { *m = Ext{} } -func (m *Ext) String() string { return proto.CompactTextString(m) } -func (*Ext) ProtoMessage() {} - -func (m *Ext) GetData() string { - if m != nil && m.Data != nil { - return *m.Data - } - return "" -} - -var E_Ext_More = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*Ext)(nil), - Field: 103, - Name: "testdata.Ext.more", - Tag: "bytes,103,opt,name=more", -} - -var E_Ext_Text = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*string)(nil), - Field: 104, - Name: "testdata.Ext.text", - Tag: "bytes,104,opt,name=text", -} - -var E_Ext_Number = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 105, - Name: "testdata.Ext.number", - Tag: "varint,105,opt,name=number", -} - -type DefaultsMessage struct { - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } -func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } -func (*DefaultsMessage) ProtoMessage() {} - -var extRange_DefaultsMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_DefaultsMessage -} -func (m *DefaultsMessage) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -type MyMessageSet struct { - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } -func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } -func (*MyMessageSet) ProtoMessage() {} - -func (m *MyMessageSet) Marshal() ([]byte, error) { - return proto.MarshalMessageSet(m.ExtensionMap()) -} -func (m *MyMessageSet) Unmarshal(buf []byte) error { - return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) -} -func (m *MyMessageSet) MarshalJSON() ([]byte, error) { - return proto.MarshalMessageSetJSON(m.XXX_extensions) -} -func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { - return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) -} - -// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler -var _ proto.Marshaler = (*MyMessageSet)(nil) -var _ proto.Unmarshaler = (*MyMessageSet)(nil) - -var extRange_MyMessageSet = []proto.ExtensionRange{ - {100, 2147483646}, -} - -func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessageSet -} -func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -type Empty struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} - -type MessageList struct { - Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList) Reset() { *m = MessageList{} } -func (m *MessageList) String() string { return proto.CompactTextString(m) } -func (*MessageList) ProtoMessage() {} - -func (m *MessageList) GetMessage() []*MessageList_Message { - if m != nil { - return m.Message - } - return nil -} - -type MessageList_Message struct { - Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` - Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } -func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } -func (*MessageList_Message) ProtoMessage() {} - -func (m *MessageList_Message) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MessageList_Message) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -type Strings struct { - StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` - BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Strings) Reset() { *m = Strings{} } -func (m *Strings) String() string { return proto.CompactTextString(m) } -func (*Strings) ProtoMessage() {} - -func (m *Strings) GetStringField() string { - if m != nil && m.StringField != nil { - return *m.StringField - } - return "" -} - -func (m *Strings) GetBytesField() []byte { - if m != nil { - return m.BytesField - } - return nil -} - -type Defaults struct { - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"` - F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"` - F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"` - F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"` - F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"` - F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"` - F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"` - F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"` - F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"` - F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"` - F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"` - F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"` - F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"` - F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` - // More fields with crazy defaults. - F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"` - F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"` - F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"` - // Sub-message. - Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` - // Redundant but explicit defaults. - StrZero *string `protobuf:"bytes,19,opt,name=str_zero,def=" json:"str_zero,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Defaults) Reset() { *m = Defaults{} } -func (m *Defaults) String() string { return proto.CompactTextString(m) } -func (*Defaults) ProtoMessage() {} - -const Default_Defaults_F_Bool bool = true -const Default_Defaults_F_Int32 int32 = 32 -const Default_Defaults_F_Int64 int64 = 64 -const Default_Defaults_F_Fixed32 uint32 = 320 -const Default_Defaults_F_Fixed64 uint64 = 640 -const Default_Defaults_F_Uint32 uint32 = 3200 -const Default_Defaults_F_Uint64 uint64 = 6400 -const Default_Defaults_F_Float float32 = 314159 -const Default_Defaults_F_Double float64 = 271828 -const Default_Defaults_F_String string = "hello, \"world!\"\n" - -var Default_Defaults_F_Bytes []byte = []byte("Bignose") - -const Default_Defaults_F_Sint32 int32 = -32 -const Default_Defaults_F_Sint64 int64 = -64 -const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN - -var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) -var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) -var Default_Defaults_F_Nan float32 = float32(math.NaN()) - -func (m *Defaults) GetF_Bool() bool { - if m != nil && m.F_Bool != nil { - return *m.F_Bool - } - return Default_Defaults_F_Bool -} - -func (m *Defaults) GetF_Int32() int32 { - if m != nil && m.F_Int32 != nil { - return *m.F_Int32 - } - return Default_Defaults_F_Int32 -} - -func (m *Defaults) GetF_Int64() int64 { - if m != nil && m.F_Int64 != nil { - return *m.F_Int64 - } - return Default_Defaults_F_Int64 -} - -func (m *Defaults) GetF_Fixed32() uint32 { - if m != nil && m.F_Fixed32 != nil { - return *m.F_Fixed32 - } - return Default_Defaults_F_Fixed32 -} - -func (m *Defaults) GetF_Fixed64() uint64 { - if m != nil && m.F_Fixed64 != nil { - return *m.F_Fixed64 - } - return Default_Defaults_F_Fixed64 -} - -func (m *Defaults) GetF_Uint32() uint32 { - if m != nil && m.F_Uint32 != nil { - return *m.F_Uint32 - } - return Default_Defaults_F_Uint32 -} - -func (m *Defaults) GetF_Uint64() uint64 { - if m != nil && m.F_Uint64 != nil { - return *m.F_Uint64 - } - return Default_Defaults_F_Uint64 -} - -func (m *Defaults) GetF_Float() float32 { - if m != nil && m.F_Float != nil { - return *m.F_Float - } - return Default_Defaults_F_Float -} - -func (m *Defaults) GetF_Double() float64 { - if m != nil && m.F_Double != nil { - return *m.F_Double - } - return Default_Defaults_F_Double -} - -func (m *Defaults) GetF_String() string { - if m != nil && m.F_String != nil { - return *m.F_String - } - return Default_Defaults_F_String -} - -func (m *Defaults) GetF_Bytes() []byte { - if m != nil && m.F_Bytes != nil { - return m.F_Bytes - } - return append([]byte(nil), Default_Defaults_F_Bytes...) -} - -func (m *Defaults) GetF_Sint32() int32 { - if m != nil && m.F_Sint32 != nil { - return *m.F_Sint32 - } - return Default_Defaults_F_Sint32 -} - -func (m *Defaults) GetF_Sint64() int64 { - if m != nil && m.F_Sint64 != nil { - return *m.F_Sint64 - } - return Default_Defaults_F_Sint64 -} - -func (m *Defaults) GetF_Enum() Defaults_Color { - if m != nil && m.F_Enum != nil { - return *m.F_Enum - } - return Default_Defaults_F_Enum -} - -func (m *Defaults) GetF_Pinf() float32 { - if m != nil && m.F_Pinf != nil { - return *m.F_Pinf - } - return Default_Defaults_F_Pinf -} - -func (m *Defaults) GetF_Ninf() float32 { - if m != nil && m.F_Ninf != nil { - return *m.F_Ninf - } - return Default_Defaults_F_Ninf -} - -func (m *Defaults) GetF_Nan() float32 { - if m != nil && m.F_Nan != nil { - return *m.F_Nan - } - return Default_Defaults_F_Nan -} - -func (m *Defaults) GetSub() *SubDefaults { - if m != nil { - return m.Sub - } - return nil -} - -func (m *Defaults) GetStrZero() string { - if m != nil && m.StrZero != nil { - return *m.StrZero - } - return "" -} - -type SubDefaults struct { - N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SubDefaults) Reset() { *m = SubDefaults{} } -func (m *SubDefaults) String() string { return proto.CompactTextString(m) } -func (*SubDefaults) ProtoMessage() {} - -const Default_SubDefaults_N int64 = 7 - -func (m *SubDefaults) GetN() int64 { - if m != nil && m.N != nil { - return *m.N - } - return Default_SubDefaults_N -} - -type RepeatedEnum struct { - Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } -func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } -func (*RepeatedEnum) ProtoMessage() {} - -func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { - if m != nil { - return m.Color - } - return nil -} - -type MoreRepeated struct { - Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` - BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` - Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` - IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` - Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"` - Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` - Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } -func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } -func (*MoreRepeated) ProtoMessage() {} - -func (m *MoreRepeated) GetBools() []bool { - if m != nil { - return m.Bools - } - return nil -} - -func (m *MoreRepeated) GetBoolsPacked() []bool { - if m != nil { - return m.BoolsPacked - } - return nil -} - -func (m *MoreRepeated) GetInts() []int32 { - if m != nil { - return m.Ints - } - return nil -} - -func (m *MoreRepeated) GetIntsPacked() []int32 { - if m != nil { - return m.IntsPacked - } - return nil -} - -func (m *MoreRepeated) GetInt64SPacked() []int64 { - if m != nil { - return m.Int64SPacked - } - return nil -} - -func (m *MoreRepeated) GetStrings() []string { - if m != nil { - return m.Strings - } - return nil -} - -func (m *MoreRepeated) GetFixeds() []uint32 { - if m != nil { - return m.Fixeds - } - return nil -} - -type GroupOld struct { - G *GroupOld_G `protobuf:"group,101,opt" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld) Reset() { *m = GroupOld{} } -func (m *GroupOld) String() string { return proto.CompactTextString(m) } -func (*GroupOld) ProtoMessage() {} - -func (m *GroupOld) GetG() *GroupOld_G { - if m != nil { - return m.G - } - return nil -} - -type GroupOld_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } -func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } -func (*GroupOld_G) ProtoMessage() {} - -func (m *GroupOld_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -type GroupNew struct { - G *GroupNew_G `protobuf:"group,101,opt" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew) Reset() { *m = GroupNew{} } -func (m *GroupNew) String() string { return proto.CompactTextString(m) } -func (*GroupNew) ProtoMessage() {} - -func (m *GroupNew) GetG() *GroupNew_G { - if m != nil { - return m.G - } - return nil -} - -type GroupNew_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } -func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } -func (*GroupNew_G) ProtoMessage() {} - -func (m *GroupNew_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *GroupNew_G) GetY() int32 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type FloatingPoint struct { - F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } -func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } -func (*FloatingPoint) ProtoMessage() {} - -func (m *FloatingPoint) GetF() float64 { - if m != nil && m.F != nil { - return *m.F - } - return 0 -} - -type MessageWithMap struct { - NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} - -func (m *MessageWithMap) GetNameMapping() map[int32]string { - if m != nil { - return m.NameMapping - } - return nil -} - -func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { - if m != nil { - return m.MsgMapping - } - return nil -} - -func (m *MessageWithMap) GetByteMapping() map[bool][]byte { - if m != nil { - return m.ByteMapping - } - return nil -} - -func (m *MessageWithMap) GetStrToStr() map[string]string { - if m != nil { - return m.StrToStr - } - return nil -} - -var E_Greeting = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: ([]string)(nil), - Field: 106, - Name: "testdata.greeting", - Tag: "bytes,106,rep,name=greeting", -} - -var E_NoDefaultDouble = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float64)(nil), - Field: 101, - Name: "testdata.no_default_double", - Tag: "fixed64,101,opt,name=no_default_double", -} - -var E_NoDefaultFloat = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float32)(nil), - Field: 102, - Name: "testdata.no_default_float", - Tag: "fixed32,102,opt,name=no_default_float", -} - -var E_NoDefaultInt32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 103, - Name: "testdata.no_default_int32", - Tag: "varint,103,opt,name=no_default_int32", -} - -var E_NoDefaultInt64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 104, - Name: "testdata.no_default_int64", - Tag: "varint,104,opt,name=no_default_int64", -} - -var E_NoDefaultUint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 105, - Name: "testdata.no_default_uint32", - Tag: "varint,105,opt,name=no_default_uint32", -} - -var E_NoDefaultUint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 106, - Name: "testdata.no_default_uint64", - Tag: "varint,106,opt,name=no_default_uint64", -} - -var E_NoDefaultSint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 107, - Name: "testdata.no_default_sint32", - Tag: "zigzag32,107,opt,name=no_default_sint32", -} - -var E_NoDefaultSint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 108, - Name: "testdata.no_default_sint64", - Tag: "zigzag64,108,opt,name=no_default_sint64", -} - -var E_NoDefaultFixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 109, - Name: "testdata.no_default_fixed32", - Tag: "fixed32,109,opt,name=no_default_fixed32", -} - -var E_NoDefaultFixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 110, - Name: "testdata.no_default_fixed64", - Tag: "fixed64,110,opt,name=no_default_fixed64", -} - -var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 111, - Name: "testdata.no_default_sfixed32", - Tag: "fixed32,111,opt,name=no_default_sfixed32", -} - -var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 112, - Name: "testdata.no_default_sfixed64", - Tag: "fixed64,112,opt,name=no_default_sfixed64", -} - -var E_NoDefaultBool = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 113, - Name: "testdata.no_default_bool", - Tag: "varint,113,opt,name=no_default_bool", -} - -var E_NoDefaultString = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*string)(nil), - Field: 114, - Name: "testdata.no_default_string", - Tag: "bytes,114,opt,name=no_default_string", -} - -var E_NoDefaultBytes = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: ([]byte)(nil), - Field: 115, - Name: "testdata.no_default_bytes", - Tag: "bytes,115,opt,name=no_default_bytes", -} - -var E_NoDefaultEnum = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), - Field: 116, - Name: "testdata.no_default_enum", - Tag: "varint,116,opt,name=no_default_enum,enum=testdata.DefaultsMessage_DefaultsEnum", -} - -var E_DefaultDouble = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float64)(nil), - Field: 201, - Name: "testdata.default_double", - Tag: "fixed64,201,opt,name=default_double,def=3.1415", -} - -var E_DefaultFloat = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float32)(nil), - Field: 202, - Name: "testdata.default_float", - Tag: "fixed32,202,opt,name=default_float,def=3.14", -} - -var E_DefaultInt32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 203, - Name: "testdata.default_int32", - Tag: "varint,203,opt,name=default_int32,def=42", -} - -var E_DefaultInt64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 204, - Name: "testdata.default_int64", - Tag: "varint,204,opt,name=default_int64,def=43", -} - -var E_DefaultUint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 205, - Name: "testdata.default_uint32", - Tag: "varint,205,opt,name=default_uint32,def=44", -} - -var E_DefaultUint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 206, - Name: "testdata.default_uint64", - Tag: "varint,206,opt,name=default_uint64,def=45", -} - -var E_DefaultSint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 207, - Name: "testdata.default_sint32", - Tag: "zigzag32,207,opt,name=default_sint32,def=46", -} - -var E_DefaultSint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 208, - Name: "testdata.default_sint64", - Tag: "zigzag64,208,opt,name=default_sint64,def=47", -} - -var E_DefaultFixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 209, - Name: "testdata.default_fixed32", - Tag: "fixed32,209,opt,name=default_fixed32,def=48", -} - -var E_DefaultFixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 210, - Name: "testdata.default_fixed64", - Tag: "fixed64,210,opt,name=default_fixed64,def=49", -} - -var E_DefaultSfixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 211, - Name: "testdata.default_sfixed32", - Tag: "fixed32,211,opt,name=default_sfixed32,def=50", -} - -var E_DefaultSfixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 212, - Name: "testdata.default_sfixed64", - Tag: "fixed64,212,opt,name=default_sfixed64,def=51", -} - -var E_DefaultBool = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 213, - Name: "testdata.default_bool", - Tag: "varint,213,opt,name=default_bool,def=1", -} - -var E_DefaultString = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*string)(nil), - Field: 214, - Name: "testdata.default_string", - Tag: "bytes,214,opt,name=default_string,def=Hello, string", -} - -var E_DefaultBytes = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: ([]byte)(nil), - Field: 215, - Name: "testdata.default_bytes", - Tag: "bytes,215,opt,name=default_bytes,def=Hello, bytes", -} - -var E_DefaultEnum = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), - Field: 216, - Name: "testdata.default_enum", - Tag: "varint,216,opt,name=default_enum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", -} - -var E_X201 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 201, - Name: "testdata.x201", - Tag: "bytes,201,opt,name=x201", -} - -var E_X202 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 202, - Name: "testdata.x202", - Tag: "bytes,202,opt,name=x202", -} - -var E_X203 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 203, - Name: "testdata.x203", - Tag: "bytes,203,opt,name=x203", -} - -var E_X204 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 204, - Name: "testdata.x204", - Tag: "bytes,204,opt,name=x204", -} - -var E_X205 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 205, - Name: "testdata.x205", - Tag: "bytes,205,opt,name=x205", -} - -var E_X206 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 206, - Name: "testdata.x206", - Tag: "bytes,206,opt,name=x206", -} - -var E_X207 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 207, - Name: "testdata.x207", - Tag: "bytes,207,opt,name=x207", -} - -var E_X208 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 208, - Name: "testdata.x208", - Tag: "bytes,208,opt,name=x208", -} - -var E_X209 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 209, - Name: "testdata.x209", - Tag: "bytes,209,opt,name=x209", -} - -var E_X210 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 210, - Name: "testdata.x210", - Tag: "bytes,210,opt,name=x210", -} - -var E_X211 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 211, - Name: "testdata.x211", - Tag: "bytes,211,opt,name=x211", -} - -var E_X212 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 212, - Name: "testdata.x212", - Tag: "bytes,212,opt,name=x212", -} - -var E_X213 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 213, - Name: "testdata.x213", - Tag: "bytes,213,opt,name=x213", -} - -var E_X214 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 214, - Name: "testdata.x214", - Tag: "bytes,214,opt,name=x214", -} - -var E_X215 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 215, - Name: "testdata.x215", - Tag: "bytes,215,opt,name=x215", -} - -var E_X216 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 216, - Name: "testdata.x216", - Tag: "bytes,216,opt,name=x216", -} - -var E_X217 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 217, - Name: "testdata.x217", - Tag: "bytes,217,opt,name=x217", -} - -var E_X218 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 218, - Name: "testdata.x218", - Tag: "bytes,218,opt,name=x218", -} - -var E_X219 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 219, - Name: "testdata.x219", - Tag: "bytes,219,opt,name=x219", -} - -var E_X220 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 220, - Name: "testdata.x220", - Tag: "bytes,220,opt,name=x220", -} - -var E_X221 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 221, - Name: "testdata.x221", - Tag: "bytes,221,opt,name=x221", -} - -var E_X222 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 222, - Name: "testdata.x222", - Tag: "bytes,222,opt,name=x222", -} - -var E_X223 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 223, - Name: "testdata.x223", - Tag: "bytes,223,opt,name=x223", -} - -var E_X224 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 224, - Name: "testdata.x224", - Tag: "bytes,224,opt,name=x224", -} - -var E_X225 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 225, - Name: "testdata.x225", - Tag: "bytes,225,opt,name=x225", -} - -var E_X226 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 226, - Name: "testdata.x226", - Tag: "bytes,226,opt,name=x226", -} - -var E_X227 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 227, - Name: "testdata.x227", - Tag: "bytes,227,opt,name=x227", -} - -var E_X228 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 228, - Name: "testdata.x228", - Tag: "bytes,228,opt,name=x228", -} - -var E_X229 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 229, - Name: "testdata.x229", - Tag: "bytes,229,opt,name=x229", -} - -var E_X230 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 230, - Name: "testdata.x230", - Tag: "bytes,230,opt,name=x230", -} - -var E_X231 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 231, - Name: "testdata.x231", - Tag: "bytes,231,opt,name=x231", -} - -var E_X232 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 232, - Name: "testdata.x232", - Tag: "bytes,232,opt,name=x232", -} - -var E_X233 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 233, - Name: "testdata.x233", - Tag: "bytes,233,opt,name=x233", -} - -var E_X234 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 234, - Name: "testdata.x234", - Tag: "bytes,234,opt,name=x234", -} - -var E_X235 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 235, - Name: "testdata.x235", - Tag: "bytes,235,opt,name=x235", -} - -var E_X236 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 236, - Name: "testdata.x236", - Tag: "bytes,236,opt,name=x236", -} - -var E_X237 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 237, - Name: "testdata.x237", - Tag: "bytes,237,opt,name=x237", -} - -var E_X238 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 238, - Name: "testdata.x238", - Tag: "bytes,238,opt,name=x238", -} - -var E_X239 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 239, - Name: "testdata.x239", - Tag: "bytes,239,opt,name=x239", -} - -var E_X240 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 240, - Name: "testdata.x240", - Tag: "bytes,240,opt,name=x240", -} - -var E_X241 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 241, - Name: "testdata.x241", - Tag: "bytes,241,opt,name=x241", -} - -var E_X242 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 242, - Name: "testdata.x242", - Tag: "bytes,242,opt,name=x242", -} - -var E_X243 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 243, - Name: "testdata.x243", - Tag: "bytes,243,opt,name=x243", -} - -var E_X244 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 244, - Name: "testdata.x244", - Tag: "bytes,244,opt,name=x244", -} - -var E_X245 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 245, - Name: "testdata.x245", - Tag: "bytes,245,opt,name=x245", -} - -var E_X246 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 246, - Name: "testdata.x246", - Tag: "bytes,246,opt,name=x246", -} - -var E_X247 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 247, - Name: "testdata.x247", - Tag: "bytes,247,opt,name=x247", -} - -var E_X248 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 248, - Name: "testdata.x248", - Tag: "bytes,248,opt,name=x248", -} - -var E_X249 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 249, - Name: "testdata.x249", - Tag: "bytes,249,opt,name=x249", -} - -var E_X250 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 250, - Name: "testdata.x250", - Tag: "bytes,250,opt,name=x250", -} - -func init() { - proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) - proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) - proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) - proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) - proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) - proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) - proto.RegisterExtension(E_Ext_More) - proto.RegisterExtension(E_Ext_Text) - proto.RegisterExtension(E_Ext_Number) - proto.RegisterExtension(E_Greeting) - proto.RegisterExtension(E_NoDefaultDouble) - proto.RegisterExtension(E_NoDefaultFloat) - proto.RegisterExtension(E_NoDefaultInt32) - proto.RegisterExtension(E_NoDefaultInt64) - proto.RegisterExtension(E_NoDefaultUint32) - proto.RegisterExtension(E_NoDefaultUint64) - proto.RegisterExtension(E_NoDefaultSint32) - proto.RegisterExtension(E_NoDefaultSint64) - proto.RegisterExtension(E_NoDefaultFixed32) - proto.RegisterExtension(E_NoDefaultFixed64) - proto.RegisterExtension(E_NoDefaultSfixed32) - proto.RegisterExtension(E_NoDefaultSfixed64) - proto.RegisterExtension(E_NoDefaultBool) - proto.RegisterExtension(E_NoDefaultString) - proto.RegisterExtension(E_NoDefaultBytes) - proto.RegisterExtension(E_NoDefaultEnum) - proto.RegisterExtension(E_DefaultDouble) - proto.RegisterExtension(E_DefaultFloat) - proto.RegisterExtension(E_DefaultInt32) - proto.RegisterExtension(E_DefaultInt64) - proto.RegisterExtension(E_DefaultUint32) - proto.RegisterExtension(E_DefaultUint64) - proto.RegisterExtension(E_DefaultSint32) - proto.RegisterExtension(E_DefaultSint64) - proto.RegisterExtension(E_DefaultFixed32) - proto.RegisterExtension(E_DefaultFixed64) - proto.RegisterExtension(E_DefaultSfixed32) - proto.RegisterExtension(E_DefaultSfixed64) - proto.RegisterExtension(E_DefaultBool) - proto.RegisterExtension(E_DefaultString) - proto.RegisterExtension(E_DefaultBytes) - proto.RegisterExtension(E_DefaultEnum) - proto.RegisterExtension(E_X201) - proto.RegisterExtension(E_X202) - proto.RegisterExtension(E_X203) - proto.RegisterExtension(E_X204) - proto.RegisterExtension(E_X205) - proto.RegisterExtension(E_X206) - proto.RegisterExtension(E_X207) - proto.RegisterExtension(E_X208) - proto.RegisterExtension(E_X209) - proto.RegisterExtension(E_X210) - proto.RegisterExtension(E_X211) - proto.RegisterExtension(E_X212) - proto.RegisterExtension(E_X213) - proto.RegisterExtension(E_X214) - proto.RegisterExtension(E_X215) - proto.RegisterExtension(E_X216) - proto.RegisterExtension(E_X217) - proto.RegisterExtension(E_X218) - proto.RegisterExtension(E_X219) - proto.RegisterExtension(E_X220) - proto.RegisterExtension(E_X221) - proto.RegisterExtension(E_X222) - proto.RegisterExtension(E_X223) - proto.RegisterExtension(E_X224) - proto.RegisterExtension(E_X225) - proto.RegisterExtension(E_X226) - proto.RegisterExtension(E_X227) - proto.RegisterExtension(E_X228) - proto.RegisterExtension(E_X229) - proto.RegisterExtension(E_X230) - proto.RegisterExtension(E_X231) - proto.RegisterExtension(E_X232) - proto.RegisterExtension(E_X233) - proto.RegisterExtension(E_X234) - proto.RegisterExtension(E_X235) - proto.RegisterExtension(E_X236) - proto.RegisterExtension(E_X237) - proto.RegisterExtension(E_X238) - proto.RegisterExtension(E_X239) - proto.RegisterExtension(E_X240) - proto.RegisterExtension(E_X241) - proto.RegisterExtension(E_X242) - proto.RegisterExtension(E_X243) - proto.RegisterExtension(E_X244) - proto.RegisterExtension(E_X245) - proto.RegisterExtension(E_X246) - proto.RegisterExtension(E_X247) - proto.RegisterExtension(E_X248) - proto.RegisterExtension(E_X249) - proto.RegisterExtension(E_X250) -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.pb.go.golden b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.pb.go.golden deleted file mode 100644 index 0387853d5..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.pb.go.golden +++ /dev/null @@ -1,1737 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: test.proto -// DO NOT EDIT! - -package testdata - -import proto "github.com/gogo/protobuf/proto" -import json "encoding/json" -import math "math" - -import () - -// Reference proto, json, and math imports to suppress error if they are not otherwise used. -var _ = proto.Marshal -var _ = &json.SyntaxError{} -var _ = math.Inf - -type FOO int32 - -const ( - FOO_FOO1 FOO = 1 -) - -var FOO_name = map[int32]string{ - 1: "FOO1", -} -var FOO_value = map[string]int32{ - "FOO1": 1, -} - -func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p -} -func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) -} -func (x FOO) MarshalJSON() ([]byte, error) { - return json.Marshal(x.String()) -} -func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") - if err != nil { - return err - } - *x = FOO(value) - return nil -} - -type GoTest_KIND int32 - -const ( - GoTest_VOID GoTest_KIND = 0 - GoTest_BOOL GoTest_KIND = 1 - GoTest_BYTES GoTest_KIND = 2 - GoTest_FINGERPRINT GoTest_KIND = 3 - GoTest_FLOAT GoTest_KIND = 4 - GoTest_INT GoTest_KIND = 5 - GoTest_STRING GoTest_KIND = 6 - GoTest_TIME GoTest_KIND = 7 - GoTest_TUPLE GoTest_KIND = 8 - GoTest_ARRAY GoTest_KIND = 9 - GoTest_MAP GoTest_KIND = 10 - GoTest_TABLE GoTest_KIND = 11 - GoTest_FUNCTION GoTest_KIND = 12 -) - -var GoTest_KIND_name = map[int32]string{ - 0: "VOID", - 1: "BOOL", - 2: "BYTES", - 3: "FINGERPRINT", - 4: "FLOAT", - 5: "INT", - 6: "STRING", - 7: "TIME", - 8: "TUPLE", - 9: "ARRAY", - 10: "MAP", - 11: "TABLE", - 12: "FUNCTION", -} -var GoTest_KIND_value = map[string]int32{ - "VOID": 0, - "BOOL": 1, - "BYTES": 2, - "FINGERPRINT": 3, - "FLOAT": 4, - "INT": 5, - "STRING": 6, - "TIME": 7, - "TUPLE": 8, - "ARRAY": 9, - "MAP": 10, - "TABLE": 11, - "FUNCTION": 12, -} - -func (x GoTest_KIND) Enum() *GoTest_KIND { - p := new(GoTest_KIND) - *p = x - return p -} -func (x GoTest_KIND) String() string { - return proto.EnumName(GoTest_KIND_name, int32(x)) -} -func (x GoTest_KIND) MarshalJSON() ([]byte, error) { - return json.Marshal(x.String()) -} -func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") - if err != nil { - return err - } - *x = GoTest_KIND(value) - return nil -} - -type MyMessage_Color int32 - -const ( - MyMessage_RED MyMessage_Color = 0 - MyMessage_GREEN MyMessage_Color = 1 - MyMessage_BLUE MyMessage_Color = 2 -) - -var MyMessage_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var MyMessage_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x MyMessage_Color) Enum() *MyMessage_Color { - p := new(MyMessage_Color) - *p = x - return p -} -func (x MyMessage_Color) String() string { - return proto.EnumName(MyMessage_Color_name, int32(x)) -} -func (x MyMessage_Color) MarshalJSON() ([]byte, error) { - return json.Marshal(x.String()) -} -func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") - if err != nil { - return err - } - *x = MyMessage_Color(value) - return nil -} - -type Defaults_Color int32 - -const ( - Defaults_RED Defaults_Color = 0 - Defaults_GREEN Defaults_Color = 1 - Defaults_BLUE Defaults_Color = 2 -) - -var Defaults_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var Defaults_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x Defaults_Color) Enum() *Defaults_Color { - p := new(Defaults_Color) - *p = x - return p -} -func (x Defaults_Color) String() string { - return proto.EnumName(Defaults_Color_name, int32(x)) -} -func (x Defaults_Color) MarshalJSON() ([]byte, error) { - return json.Marshal(x.String()) -} -func (x *Defaults_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") - if err != nil { - return err - } - *x = Defaults_Color(value) - return nil -} - -type RepeatedEnum_Color int32 - -const ( - RepeatedEnum_RED RepeatedEnum_Color = 1 -) - -var RepeatedEnum_Color_name = map[int32]string{ - 1: "RED", -} -var RepeatedEnum_Color_value = map[string]int32{ - "RED": 1, -} - -func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { - p := new(RepeatedEnum_Color) - *p = x - return p -} -func (x RepeatedEnum_Color) String() string { - return proto.EnumName(RepeatedEnum_Color_name, int32(x)) -} -func (x RepeatedEnum_Color) MarshalJSON() ([]byte, error) { - return json.Marshal(x.String()) -} -func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") - if err != nil { - return err - } - *x = RepeatedEnum_Color(value) - return nil -} - -type GoEnum struct { - Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoEnum) Reset() { *m = GoEnum{} } -func (m *GoEnum) String() string { return proto.CompactTextString(m) } -func (*GoEnum) ProtoMessage() {} - -func (m *GoEnum) GetFoo() FOO { - if m != nil && m.Foo != nil { - return *m.Foo - } - return 0 -} - -type GoTestField struct { - Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"` - Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTestField) Reset() { *m = GoTestField{} } -func (m *GoTestField) String() string { return proto.CompactTextString(m) } -func (*GoTestField) ProtoMessage() {} - -func (m *GoTestField) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" -} - -func (m *GoTestField) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -type GoTest struct { - Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` - Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"` - Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"` - RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"` - RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"` - OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"` - F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` - F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` - F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` - F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` - F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` - F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` - F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` - F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` - F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` - F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` - F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` - F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` - F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` - F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` - F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` - F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` - F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` - F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` - F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` - F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` - F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` - F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` - F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` - F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` - F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` - F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` - F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` - F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` - F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` - F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` - F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` - F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` - F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` - F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` - F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` - F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` - F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` - F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` - F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` - F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` - F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` - F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` - F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` - F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` - F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` - F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` - F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` - F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` - F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` - F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` - F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` - F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` - F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` - F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` - F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` - F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` - F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` - F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` - F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` - F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` - F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` - F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` - F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` - Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` - Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` - Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest) Reset() { *m = GoTest{} } -func (m *GoTest) String() string { return proto.CompactTextString(m) } -func (*GoTest) ProtoMessage() {} - -const Default_GoTest_F_BoolDefaulted bool = true -const Default_GoTest_F_Int32Defaulted int32 = 32 -const Default_GoTest_F_Int64Defaulted int64 = 64 -const Default_GoTest_F_Fixed32Defaulted uint32 = 320 -const Default_GoTest_F_Fixed64Defaulted uint64 = 640 -const Default_GoTest_F_Uint32Defaulted uint32 = 3200 -const Default_GoTest_F_Uint64Defaulted uint64 = 6400 -const Default_GoTest_F_FloatDefaulted float32 = 314159 -const Default_GoTest_F_DoubleDefaulted float64 = 271828 -const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" - -var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") - -const Default_GoTest_F_Sint32Defaulted int32 = -32 -const Default_GoTest_F_Sint64Defaulted int64 = -64 - -func (m *GoTest) GetKind() GoTest_KIND { - if m != nil && m.Kind != nil { - return *m.Kind - } - return 0 -} - -func (m *GoTest) GetTable() string { - if m != nil && m.Table != nil { - return *m.Table - } - return "" -} - -func (m *GoTest) GetParam() int32 { - if m != nil && m.Param != nil { - return *m.Param - } - return 0 -} - -func (m *GoTest) GetRequiredField() *GoTestField { - if m != nil { - return m.RequiredField - } - return nil -} - -func (m *GoTest) GetRepeatedField() []*GoTestField { - if m != nil { - return m.RepeatedField - } - return nil -} - -func (m *GoTest) GetOptionalField() *GoTestField { - if m != nil { - return m.OptionalField - } - return nil -} - -func (m *GoTest) GetF_BoolRequired() bool { - if m != nil && m.F_BoolRequired != nil { - return *m.F_BoolRequired - } - return false -} - -func (m *GoTest) GetF_Int32Required() int32 { - if m != nil && m.F_Int32Required != nil { - return *m.F_Int32Required - } - return 0 -} - -func (m *GoTest) GetF_Int64Required() int64 { - if m != nil && m.F_Int64Required != nil { - return *m.F_Int64Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Required() uint32 { - if m != nil && m.F_Fixed32Required != nil { - return *m.F_Fixed32Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Required() uint64 { - if m != nil && m.F_Fixed64Required != nil { - return *m.F_Fixed64Required - } - return 0 -} - -func (m *GoTest) GetF_Uint32Required() uint32 { - if m != nil && m.F_Uint32Required != nil { - return *m.F_Uint32Required - } - return 0 -} - -func (m *GoTest) GetF_Uint64Required() uint64 { - if m != nil && m.F_Uint64Required != nil { - return *m.F_Uint64Required - } - return 0 -} - -func (m *GoTest) GetF_FloatRequired() float32 { - if m != nil && m.F_FloatRequired != nil { - return *m.F_FloatRequired - } - return 0 -} - -func (m *GoTest) GetF_DoubleRequired() float64 { - if m != nil && m.F_DoubleRequired != nil { - return *m.F_DoubleRequired - } - return 0 -} - -func (m *GoTest) GetF_StringRequired() string { - if m != nil && m.F_StringRequired != nil { - return *m.F_StringRequired - } - return "" -} - -func (m *GoTest) GetF_BytesRequired() []byte { - if m != nil { - return m.F_BytesRequired - } - return nil -} - -func (m *GoTest) GetF_Sint32Required() int32 { - if m != nil && m.F_Sint32Required != nil { - return *m.F_Sint32Required - } - return 0 -} - -func (m *GoTest) GetF_Sint64Required() int64 { - if m != nil && m.F_Sint64Required != nil { - return *m.F_Sint64Required - } - return 0 -} - -func (m *GoTest) GetF_BoolRepeated() []bool { - if m != nil { - return m.F_BoolRepeated - } - return nil -} - -func (m *GoTest) GetF_Int32Repeated() []int32 { - if m != nil { - return m.F_Int32Repeated - } - return nil -} - -func (m *GoTest) GetF_Int64Repeated() []int64 { - if m != nil { - return m.F_Int64Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed32Repeated() []uint32 { - if m != nil { - return m.F_Fixed32Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed64Repeated() []uint64 { - if m != nil { - return m.F_Fixed64Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint32Repeated() []uint32 { - if m != nil { - return m.F_Uint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint64Repeated() []uint64 { - if m != nil { - return m.F_Uint64Repeated - } - return nil -} - -func (m *GoTest) GetF_FloatRepeated() []float32 { - if m != nil { - return m.F_FloatRepeated - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeated() []float64 { - if m != nil { - return m.F_DoubleRepeated - } - return nil -} - -func (m *GoTest) GetF_StringRepeated() []string { - if m != nil { - return m.F_StringRepeated - } - return nil -} - -func (m *GoTest) GetF_BytesRepeated() [][]byte { - if m != nil { - return m.F_BytesRepeated - } - return nil -} - -func (m *GoTest) GetF_Sint32Repeated() []int32 { - if m != nil { - return m.F_Sint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Sint64Repeated() []int64 { - if m != nil { - return m.F_Sint64Repeated - } - return nil -} - -func (m *GoTest) GetF_BoolOptional() bool { - if m != nil && m.F_BoolOptional != nil { - return *m.F_BoolOptional - } - return false -} - -func (m *GoTest) GetF_Int32Optional() int32 { - if m != nil && m.F_Int32Optional != nil { - return *m.F_Int32Optional - } - return 0 -} - -func (m *GoTest) GetF_Int64Optional() int64 { - if m != nil && m.F_Int64Optional != nil { - return *m.F_Int64Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Optional() uint32 { - if m != nil && m.F_Fixed32Optional != nil { - return *m.F_Fixed32Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Optional() uint64 { - if m != nil && m.F_Fixed64Optional != nil { - return *m.F_Fixed64Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint32Optional() uint32 { - if m != nil && m.F_Uint32Optional != nil { - return *m.F_Uint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint64Optional() uint64 { - if m != nil && m.F_Uint64Optional != nil { - return *m.F_Uint64Optional - } - return 0 -} - -func (m *GoTest) GetF_FloatOptional() float32 { - if m != nil && m.F_FloatOptional != nil { - return *m.F_FloatOptional - } - return 0 -} - -func (m *GoTest) GetF_DoubleOptional() float64 { - if m != nil && m.F_DoubleOptional != nil { - return *m.F_DoubleOptional - } - return 0 -} - -func (m *GoTest) GetF_StringOptional() string { - if m != nil && m.F_StringOptional != nil { - return *m.F_StringOptional - } - return "" -} - -func (m *GoTest) GetF_BytesOptional() []byte { - if m != nil { - return m.F_BytesOptional - } - return nil -} - -func (m *GoTest) GetF_Sint32Optional() int32 { - if m != nil && m.F_Sint32Optional != nil { - return *m.F_Sint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Sint64Optional() int64 { - if m != nil && m.F_Sint64Optional != nil { - return *m.F_Sint64Optional - } - return 0 -} - -func (m *GoTest) GetF_BoolDefaulted() bool { - if m != nil && m.F_BoolDefaulted != nil { - return *m.F_BoolDefaulted - } - return Default_GoTest_F_BoolDefaulted -} - -func (m *GoTest) GetF_Int32Defaulted() int32 { - if m != nil && m.F_Int32Defaulted != nil { - return *m.F_Int32Defaulted - } - return Default_GoTest_F_Int32Defaulted -} - -func (m *GoTest) GetF_Int64Defaulted() int64 { - if m != nil && m.F_Int64Defaulted != nil { - return *m.F_Int64Defaulted - } - return Default_GoTest_F_Int64Defaulted -} - -func (m *GoTest) GetF_Fixed32Defaulted() uint32 { - if m != nil && m.F_Fixed32Defaulted != nil { - return *m.F_Fixed32Defaulted - } - return Default_GoTest_F_Fixed32Defaulted -} - -func (m *GoTest) GetF_Fixed64Defaulted() uint64 { - if m != nil && m.F_Fixed64Defaulted != nil { - return *m.F_Fixed64Defaulted - } - return Default_GoTest_F_Fixed64Defaulted -} - -func (m *GoTest) GetF_Uint32Defaulted() uint32 { - if m != nil && m.F_Uint32Defaulted != nil { - return *m.F_Uint32Defaulted - } - return Default_GoTest_F_Uint32Defaulted -} - -func (m *GoTest) GetF_Uint64Defaulted() uint64 { - if m != nil && m.F_Uint64Defaulted != nil { - return *m.F_Uint64Defaulted - } - return Default_GoTest_F_Uint64Defaulted -} - -func (m *GoTest) GetF_FloatDefaulted() float32 { - if m != nil && m.F_FloatDefaulted != nil { - return *m.F_FloatDefaulted - } - return Default_GoTest_F_FloatDefaulted -} - -func (m *GoTest) GetF_DoubleDefaulted() float64 { - if m != nil && m.F_DoubleDefaulted != nil { - return *m.F_DoubleDefaulted - } - return Default_GoTest_F_DoubleDefaulted -} - -func (m *GoTest) GetF_StringDefaulted() string { - if m != nil && m.F_StringDefaulted != nil { - return *m.F_StringDefaulted - } - return Default_GoTest_F_StringDefaulted -} - -func (m *GoTest) GetF_BytesDefaulted() []byte { - if m != nil && m.F_BytesDefaulted != nil { - return m.F_BytesDefaulted - } - return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) -} - -func (m *GoTest) GetF_Sint32Defaulted() int32 { - if m != nil && m.F_Sint32Defaulted != nil { - return *m.F_Sint32Defaulted - } - return Default_GoTest_F_Sint32Defaulted -} - -func (m *GoTest) GetF_Sint64Defaulted() int64 { - if m != nil && m.F_Sint64Defaulted != nil { - return *m.F_Sint64Defaulted - } - return Default_GoTest_F_Sint64Defaulted -} - -func (m *GoTest) GetF_BoolRepeatedPacked() []bool { - if m != nil { - return m.F_BoolRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { - if m != nil { - return m.F_Int32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { - if m != nil { - return m.F_Int64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Fixed32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Fixed64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Uint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Uint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { - if m != nil { - return m.F_FloatRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { - if m != nil { - return m.F_DoubleRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { - if m != nil { - return m.F_Sint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { - if m != nil { - return m.F_Sint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { - if m != nil { - return m.Requiredgroup - } - return nil -} - -func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { - if m != nil { - return m.Repeatedgroup - } - return nil -} - -func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil -} - -type GoTest_RequiredGroup struct { - RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } - -func (m *GoTest_RequiredGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_RepeatedGroup struct { - RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } - -func (m *GoTest_RepeatedGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } - -func (m *GoTest_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoSkipTest struct { - SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` - SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` - SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` - SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` - Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } -func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest) ProtoMessage() {} - -func (m *GoSkipTest) GetSkipInt32() int32 { - if m != nil && m.SkipInt32 != nil { - return *m.SkipInt32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed32() uint32 { - if m != nil && m.SkipFixed32 != nil { - return *m.SkipFixed32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed64() uint64 { - if m != nil && m.SkipFixed64 != nil { - return *m.SkipFixed64 - } - return 0 -} - -func (m *GoSkipTest) GetSkipString() string { - if m != nil && m.SkipString != nil { - return *m.SkipString - } - return "" -} - -func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { - if m != nil { - return m.Skipgroup - } - return nil -} - -type GoSkipTest_SkipGroup struct { - GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` - GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } - -func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { - if m != nil && m.GroupInt32 != nil { - return *m.GroupInt32 - } - return 0 -} - -func (m *GoSkipTest_SkipGroup) GetGroupString() string { - if m != nil && m.GroupString != nil { - return *m.GroupString - } - return "" -} - -type NonPackedTest struct { - A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } -func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } -func (*NonPackedTest) ProtoMessage() {} - -func (m *NonPackedTest) GetA() []int32 { - if m != nil { - return m.A - } - return nil -} - -type PackedTest struct { - B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PackedTest) Reset() { *m = PackedTest{} } -func (m *PackedTest) String() string { return proto.CompactTextString(m) } -func (*PackedTest) ProtoMessage() {} - -func (m *PackedTest) GetB() []int32 { - if m != nil { - return m.B - } - return nil -} - -type MaxTag struct { - LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MaxTag) Reset() { *m = MaxTag{} } -func (m *MaxTag) String() string { return proto.CompactTextString(m) } -func (*MaxTag) ProtoMessage() {} - -func (m *MaxTag) GetLastField() string { - if m != nil && m.LastField != nil { - return *m.LastField - } - return "" -} - -type OldMessage struct { - Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage) Reset() { *m = OldMessage{} } -func (m *OldMessage) String() string { return proto.CompactTextString(m) } -func (*OldMessage) ProtoMessage() {} - -func (m *OldMessage) GetNested() *OldMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -type OldMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } -func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*OldMessage_Nested) ProtoMessage() {} - -func (m *OldMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type NewMessage struct { - Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage) Reset() { *m = NewMessage{} } -func (m *NewMessage) String() string { return proto.CompactTextString(m) } -func (*NewMessage) ProtoMessage() {} - -func (m *NewMessage) GetNested() *NewMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -type NewMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } -func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*NewMessage_Nested) ProtoMessage() {} - -func (m *NewMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *NewMessage_Nested) GetFoodGroup() string { - if m != nil && m.FoodGroup != nil { - return *m.FoodGroup - } - return "" -} - -type InnerMessage struct { - Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` - Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` - Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *InnerMessage) Reset() { *m = InnerMessage{} } -func (m *InnerMessage) String() string { return proto.CompactTextString(m) } -func (*InnerMessage) ProtoMessage() {} - -const Default_InnerMessage_Port int32 = 4000 - -func (m *InnerMessage) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *InnerMessage) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return Default_InnerMessage_Port -} - -func (m *InnerMessage) GetConnected() bool { - if m != nil && m.Connected != nil { - return *m.Connected - } - return false -} - -type OtherMessage struct { - Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` - Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OtherMessage) Reset() { *m = OtherMessage{} } -func (m *OtherMessage) String() string { return proto.CompactTextString(m) } -func (*OtherMessage) ProtoMessage() {} - -func (m *OtherMessage) GetKey() int64 { - if m != nil && m.Key != nil { - return *m.Key - } - return 0 -} - -func (m *OtherMessage) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *OtherMessage) GetWeight() float32 { - if m != nil && m.Weight != nil { - return *m.Weight - } - return 0 -} - -func (m *OtherMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -type MyMessage struct { - Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` - Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` - Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` - Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` - Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` - Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` - RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` - Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage) Reset() { *m = MyMessage{} } -func (m *MyMessage) String() string { return proto.CompactTextString(m) } -func (*MyMessage) ProtoMessage() {} - -var extRange_MyMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessage -} -func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -func (m *MyMessage) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *MyMessage) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MyMessage) GetQuote() string { - if m != nil && m.Quote != nil { - return *m.Quote - } - return "" -} - -func (m *MyMessage) GetPet() []string { - if m != nil { - return m.Pet - } - return nil -} - -func (m *MyMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -func (m *MyMessage) GetOthers() []*OtherMessage { - if m != nil { - return m.Others - } - return nil -} - -func (m *MyMessage) GetBikeshed() MyMessage_Color { - if m != nil && m.Bikeshed != nil { - return *m.Bikeshed - } - return 0 -} - -func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { - if m != nil { - return m.Somegroup - } - return nil -} - -func (m *MyMessage) GetRepBytes() [][]byte { - if m != nil { - return m.RepBytes - } - return nil -} - -func (m *MyMessage) GetBigfloat() float64 { - if m != nil && m.Bigfloat != nil { - return *m.Bigfloat - } - return 0 -} - -type MyMessage_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } - -func (m *MyMessage_SomeGroup) GetGroupField() int32 { - if m != nil && m.GroupField != nil { - return *m.GroupField - } - return 0 -} - -type Ext struct { - Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Ext) Reset() { *m = Ext{} } -func (m *Ext) String() string { return proto.CompactTextString(m) } -func (*Ext) ProtoMessage() {} - -func (m *Ext) GetData() string { - if m != nil && m.Data != nil { - return *m.Data - } - return "" -} - -var E_Ext_More = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*Ext)(nil), - Field: 103, - Name: "testdata.Ext.more", - Tag: "bytes,103,opt,name=more", -} - -var E_Ext_Text = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*string)(nil), - Field: 104, - Name: "testdata.Ext.text", - Tag: "bytes,104,opt,name=text", -} - -var E_Ext_Number = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 105, - Name: "testdata.Ext.number", - Tag: "varint,105,opt,name=number", -} - -type MessageList struct { - Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList) Reset() { *m = MessageList{} } -func (m *MessageList) String() string { return proto.CompactTextString(m) } -func (*MessageList) ProtoMessage() {} - -func (m *MessageList) GetMessage() []*MessageList_Message { - if m != nil { - return m.Message - } - return nil -} - -type MessageList_Message struct { - Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` - Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } - -func (m *MessageList_Message) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MessageList_Message) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -type Strings struct { - StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` - BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Strings) Reset() { *m = Strings{} } -func (m *Strings) String() string { return proto.CompactTextString(m) } -func (*Strings) ProtoMessage() {} - -func (m *Strings) GetStringField() string { - if m != nil && m.StringField != nil { - return *m.StringField - } - return "" -} - -func (m *Strings) GetBytesField() []byte { - if m != nil { - return m.BytesField - } - return nil -} - -type Defaults struct { - F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"` - F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"` - F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"` - F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"` - F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"` - F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"` - F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"` - F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"` - F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"` - F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"` - F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"` - F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"` - F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"` - F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` - F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"` - F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"` - F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"` - Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Defaults) Reset() { *m = Defaults{} } -func (m *Defaults) String() string { return proto.CompactTextString(m) } -func (*Defaults) ProtoMessage() {} - -const Default_Defaults_F_Bool bool = true -const Default_Defaults_F_Int32 int32 = 32 -const Default_Defaults_F_Int64 int64 = 64 -const Default_Defaults_F_Fixed32 uint32 = 320 -const Default_Defaults_F_Fixed64 uint64 = 640 -const Default_Defaults_F_Uint32 uint32 = 3200 -const Default_Defaults_F_Uint64 uint64 = 6400 -const Default_Defaults_F_Float float32 = 314159 -const Default_Defaults_F_Double float64 = 271828 -const Default_Defaults_F_String string = "hello, \"world!\"\n" - -var Default_Defaults_F_Bytes []byte = []byte("Bignose") - -const Default_Defaults_F_Sint32 int32 = -32 -const Default_Defaults_F_Sint64 int64 = -64 -const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN - -var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) -var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) -var Default_Defaults_F_Nan float32 = float32(math.NaN()) - -func (m *Defaults) GetF_Bool() bool { - if m != nil && m.F_Bool != nil { - return *m.F_Bool - } - return Default_Defaults_F_Bool -} - -func (m *Defaults) GetF_Int32() int32 { - if m != nil && m.F_Int32 != nil { - return *m.F_Int32 - } - return Default_Defaults_F_Int32 -} - -func (m *Defaults) GetF_Int64() int64 { - if m != nil && m.F_Int64 != nil { - return *m.F_Int64 - } - return Default_Defaults_F_Int64 -} - -func (m *Defaults) GetF_Fixed32() uint32 { - if m != nil && m.F_Fixed32 != nil { - return *m.F_Fixed32 - } - return Default_Defaults_F_Fixed32 -} - -func (m *Defaults) GetF_Fixed64() uint64 { - if m != nil && m.F_Fixed64 != nil { - return *m.F_Fixed64 - } - return Default_Defaults_F_Fixed64 -} - -func (m *Defaults) GetF_Uint32() uint32 { - if m != nil && m.F_Uint32 != nil { - return *m.F_Uint32 - } - return Default_Defaults_F_Uint32 -} - -func (m *Defaults) GetF_Uint64() uint64 { - if m != nil && m.F_Uint64 != nil { - return *m.F_Uint64 - } - return Default_Defaults_F_Uint64 -} - -func (m *Defaults) GetF_Float() float32 { - if m != nil && m.F_Float != nil { - return *m.F_Float - } - return Default_Defaults_F_Float -} - -func (m *Defaults) GetF_Double() float64 { - if m != nil && m.F_Double != nil { - return *m.F_Double - } - return Default_Defaults_F_Double -} - -func (m *Defaults) GetF_String() string { - if m != nil && m.F_String != nil { - return *m.F_String - } - return Default_Defaults_F_String -} - -func (m *Defaults) GetF_Bytes() []byte { - if m != nil && m.F_Bytes != nil { - return m.F_Bytes - } - return append([]byte(nil), Default_Defaults_F_Bytes...) -} - -func (m *Defaults) GetF_Sint32() int32 { - if m != nil && m.F_Sint32 != nil { - return *m.F_Sint32 - } - return Default_Defaults_F_Sint32 -} - -func (m *Defaults) GetF_Sint64() int64 { - if m != nil && m.F_Sint64 != nil { - return *m.F_Sint64 - } - return Default_Defaults_F_Sint64 -} - -func (m *Defaults) GetF_Enum() Defaults_Color { - if m != nil && m.F_Enum != nil { - return *m.F_Enum - } - return Default_Defaults_F_Enum -} - -func (m *Defaults) GetF_Pinf() float32 { - if m != nil && m.F_Pinf != nil { - return *m.F_Pinf - } - return Default_Defaults_F_Pinf -} - -func (m *Defaults) GetF_Ninf() float32 { - if m != nil && m.F_Ninf != nil { - return *m.F_Ninf - } - return Default_Defaults_F_Ninf -} - -func (m *Defaults) GetF_Nan() float32 { - if m != nil && m.F_Nan != nil { - return *m.F_Nan - } - return Default_Defaults_F_Nan -} - -func (m *Defaults) GetSub() *SubDefaults { - if m != nil { - return m.Sub - } - return nil -} - -type SubDefaults struct { - N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SubDefaults) Reset() { *m = SubDefaults{} } -func (m *SubDefaults) String() string { return proto.CompactTextString(m) } -func (*SubDefaults) ProtoMessage() {} - -const Default_SubDefaults_N int64 = 7 - -func (m *SubDefaults) GetN() int64 { - if m != nil && m.N != nil { - return *m.N - } - return Default_SubDefaults_N -} - -type RepeatedEnum struct { - Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } -func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } -func (*RepeatedEnum) ProtoMessage() {} - -func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { - if m != nil { - return m.Color - } - return nil -} - -type MoreRepeated struct { - Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` - BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` - Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` - IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` - Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } -func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } -func (*MoreRepeated) ProtoMessage() {} - -func (m *MoreRepeated) GetBools() []bool { - if m != nil { - return m.Bools - } - return nil -} - -func (m *MoreRepeated) GetBoolsPacked() []bool { - if m != nil { - return m.BoolsPacked - } - return nil -} - -func (m *MoreRepeated) GetInts() []int32 { - if m != nil { - return m.Ints - } - return nil -} - -func (m *MoreRepeated) GetIntsPacked() []int32 { - if m != nil { - return m.IntsPacked - } - return nil -} - -func (m *MoreRepeated) GetStrings() []string { - if m != nil { - return m.Strings - } - return nil -} - -type GroupOld struct { - G *GroupOld_G `protobuf:"group,1,opt" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld) Reset() { *m = GroupOld{} } -func (m *GroupOld) String() string { return proto.CompactTextString(m) } -func (*GroupOld) ProtoMessage() {} - -func (m *GroupOld) GetG() *GroupOld_G { - if m != nil { - return m.G - } - return nil -} - -type GroupOld_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } - -func (m *GroupOld_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -type GroupNew struct { - G *GroupNew_G `protobuf:"group,1,opt" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew) Reset() { *m = GroupNew{} } -func (m *GroupNew) String() string { return proto.CompactTextString(m) } -func (*GroupNew) ProtoMessage() {} - -func (m *GroupNew) GetG() *GroupNew_G { - if m != nil { - return m.G - } - return nil -} - -type GroupNew_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } - -func (m *GroupNew_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *GroupNew_G) GetY() int32 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -var E_Greeting = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: ([]string)(nil), - Field: 106, - Name: "testdata.greeting", - Tag: "bytes,106,rep,name=greeting", -} - -func init() { - proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) - proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) - proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) - proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) - proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) - proto.RegisterExtension(E_Ext_More) - proto.RegisterExtension(E_Ext_Text) - proto.RegisterExtension(E_Ext_Number) - proto.RegisterExtension(E_Greeting) -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.proto b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.proto deleted file mode 100644 index 440dba38d..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/testdata/test.proto +++ /dev/null @@ -1,480 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// A feature-rich test file for the protocol compiler and libraries. - -syntax = "proto2"; - -package testdata; - -enum FOO { FOO1 = 1; }; - -message GoEnum { - required FOO foo = 1; -} - -message GoTestField { - required string Label = 1; - required string Type = 2; -} - -message GoTest { - // An enum, for completeness. - enum KIND { - VOID = 0; - - // Basic types - BOOL = 1; - BYTES = 2; - FINGERPRINT = 3; - FLOAT = 4; - INT = 5; - STRING = 6; - TIME = 7; - - // Groupings - TUPLE = 8; - ARRAY = 9; - MAP = 10; - - // Table types - TABLE = 11; - - // Functions - FUNCTION = 12; // last tag - }; - - // Some typical parameters - required KIND Kind = 1; - optional string Table = 2; - optional int32 Param = 3; - - // Required, repeated and optional foreign fields. - required GoTestField RequiredField = 4; - repeated GoTestField RepeatedField = 5; - optional GoTestField OptionalField = 6; - - // Required fields of all basic types - required bool F_Bool_required = 10; - required int32 F_Int32_required = 11; - required int64 F_Int64_required = 12; - required fixed32 F_Fixed32_required = 13; - required fixed64 F_Fixed64_required = 14; - required uint32 F_Uint32_required = 15; - required uint64 F_Uint64_required = 16; - required float F_Float_required = 17; - required double F_Double_required = 18; - required string F_String_required = 19; - required bytes F_Bytes_required = 101; - required sint32 F_Sint32_required = 102; - required sint64 F_Sint64_required = 103; - - // Repeated fields of all basic types - repeated bool F_Bool_repeated = 20; - repeated int32 F_Int32_repeated = 21; - repeated int64 F_Int64_repeated = 22; - repeated fixed32 F_Fixed32_repeated = 23; - repeated fixed64 F_Fixed64_repeated = 24; - repeated uint32 F_Uint32_repeated = 25; - repeated uint64 F_Uint64_repeated = 26; - repeated float F_Float_repeated = 27; - repeated double F_Double_repeated = 28; - repeated string F_String_repeated = 29; - repeated bytes F_Bytes_repeated = 201; - repeated sint32 F_Sint32_repeated = 202; - repeated sint64 F_Sint64_repeated = 203; - - // Optional fields of all basic types - optional bool F_Bool_optional = 30; - optional int32 F_Int32_optional = 31; - optional int64 F_Int64_optional = 32; - optional fixed32 F_Fixed32_optional = 33; - optional fixed64 F_Fixed64_optional = 34; - optional uint32 F_Uint32_optional = 35; - optional uint64 F_Uint64_optional = 36; - optional float F_Float_optional = 37; - optional double F_Double_optional = 38; - optional string F_String_optional = 39; - optional bytes F_Bytes_optional = 301; - optional sint32 F_Sint32_optional = 302; - optional sint64 F_Sint64_optional = 303; - - // Default-valued fields of all basic types - optional bool F_Bool_defaulted = 40 [default=true]; - optional int32 F_Int32_defaulted = 41 [default=32]; - optional int64 F_Int64_defaulted = 42 [default=64]; - optional fixed32 F_Fixed32_defaulted = 43 [default=320]; - optional fixed64 F_Fixed64_defaulted = 44 [default=640]; - optional uint32 F_Uint32_defaulted = 45 [default=3200]; - optional uint64 F_Uint64_defaulted = 46 [default=6400]; - optional float F_Float_defaulted = 47 [default=314159.]; - optional double F_Double_defaulted = 48 [default=271828.]; - optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; - optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; - optional sint32 F_Sint32_defaulted = 402 [default = -32]; - optional sint64 F_Sint64_defaulted = 403 [default = -64]; - - // Packed repeated fields (no string or bytes). - repeated bool F_Bool_repeated_packed = 50 [packed=true]; - repeated int32 F_Int32_repeated_packed = 51 [packed=true]; - repeated int64 F_Int64_repeated_packed = 52 [packed=true]; - repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; - repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; - repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; - repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; - repeated float F_Float_repeated_packed = 57 [packed=true]; - repeated double F_Double_repeated_packed = 58 [packed=true]; - repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; - repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; - - // Required, repeated, and optional groups. - required group RequiredGroup = 70 { - required string RequiredField = 71; - }; - - repeated group RepeatedGroup = 80 { - required string RequiredField = 81; - }; - - optional group OptionalGroup = 90 { - required string RequiredField = 91; - }; -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -message GoSkipTest { - required int32 skip_int32 = 11; - required fixed32 skip_fixed32 = 12; - required fixed64 skip_fixed64 = 13; - required string skip_string = 14; - required group SkipGroup = 15 { - required int32 group_int32 = 16; - required string group_string = 17; - } -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -message NonPackedTest { - repeated int32 a = 1; -} - -message PackedTest { - repeated int32 b = 1 [packed=true]; -} - -message MaxTag { - // Maximum possible tag number. - optional string last_field = 536870911; -} - -message OldMessage { - message Nested { - optional string name = 1; - } - optional Nested nested = 1; - - optional int32 num = 2; -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -message NewMessage { - message Nested { - optional string name = 1; - optional string food_group = 2; - } - optional Nested nested = 1; - - // This is an int32 in OldMessage. - optional int64 num = 2; -} - -// Smaller tests for ASCII formatting. - -message InnerMessage { - required string host = 1; - optional int32 port = 2 [default=4000]; - optional bool connected = 3; -} - -message OtherMessage { - optional int64 key = 1; - optional bytes value = 2; - optional float weight = 3; - optional InnerMessage inner = 4; -} - -message MyMessage { - required int32 count = 1; - optional string name = 2; - optional string quote = 3; - repeated string pet = 4; - optional InnerMessage inner = 5; - repeated OtherMessage others = 6; - repeated InnerMessage rep_inner = 12; - - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - }; - optional Color bikeshed = 7; - - optional group SomeGroup = 8 { - optional int32 group_field = 9; - } - - // This field becomes [][]byte in the generated code. - repeated bytes rep_bytes = 10; - - optional double bigfloat = 11; - - extensions 100 to max; -} - -message Ext { - extend MyMessage { - optional Ext more = 103; - optional string text = 104; - optional int32 number = 105; - } - - optional string data = 1; -} - -extend MyMessage { - repeated string greeting = 106; -} - -message DefaultsMessage { - enum DefaultsEnum { - ZERO = 0; - ONE = 1; - TWO = 2; - }; - extensions 100 to max; -} - -extend DefaultsMessage { - optional double no_default_double = 101; - optional float no_default_float = 102; - optional int32 no_default_int32 = 103; - optional int64 no_default_int64 = 104; - optional uint32 no_default_uint32 = 105; - optional uint64 no_default_uint64 = 106; - optional sint32 no_default_sint32 = 107; - optional sint64 no_default_sint64 = 108; - optional fixed32 no_default_fixed32 = 109; - optional fixed64 no_default_fixed64 = 110; - optional sfixed32 no_default_sfixed32 = 111; - optional sfixed64 no_default_sfixed64 = 112; - optional bool no_default_bool = 113; - optional string no_default_string = 114; - optional bytes no_default_bytes = 115; - optional DefaultsMessage.DefaultsEnum no_default_enum = 116; - - optional double default_double = 201 [default = 3.1415]; - optional float default_float = 202 [default = 3.14]; - optional int32 default_int32 = 203 [default = 42]; - optional int64 default_int64 = 204 [default = 43]; - optional uint32 default_uint32 = 205 [default = 44]; - optional uint64 default_uint64 = 206 [default = 45]; - optional sint32 default_sint32 = 207 [default = 46]; - optional sint64 default_sint64 = 208 [default = 47]; - optional fixed32 default_fixed32 = 209 [default = 48]; - optional fixed64 default_fixed64 = 210 [default = 49]; - optional sfixed32 default_sfixed32 = 211 [default = 50]; - optional sfixed64 default_sfixed64 = 212 [default = 51]; - optional bool default_bool = 213 [default = true]; - optional string default_string = 214 [default = "Hello, string"]; - optional bytes default_bytes = 215 [default = "Hello, bytes"]; - optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; -} - -message MyMessageSet { - option message_set_wire_format = true; - extensions 100 to max; -} - -message Empty { -} - -extend MyMessageSet { - optional Empty x201 = 201; - optional Empty x202 = 202; - optional Empty x203 = 203; - optional Empty x204 = 204; - optional Empty x205 = 205; - optional Empty x206 = 206; - optional Empty x207 = 207; - optional Empty x208 = 208; - optional Empty x209 = 209; - optional Empty x210 = 210; - optional Empty x211 = 211; - optional Empty x212 = 212; - optional Empty x213 = 213; - optional Empty x214 = 214; - optional Empty x215 = 215; - optional Empty x216 = 216; - optional Empty x217 = 217; - optional Empty x218 = 218; - optional Empty x219 = 219; - optional Empty x220 = 220; - optional Empty x221 = 221; - optional Empty x222 = 222; - optional Empty x223 = 223; - optional Empty x224 = 224; - optional Empty x225 = 225; - optional Empty x226 = 226; - optional Empty x227 = 227; - optional Empty x228 = 228; - optional Empty x229 = 229; - optional Empty x230 = 230; - optional Empty x231 = 231; - optional Empty x232 = 232; - optional Empty x233 = 233; - optional Empty x234 = 234; - optional Empty x235 = 235; - optional Empty x236 = 236; - optional Empty x237 = 237; - optional Empty x238 = 238; - optional Empty x239 = 239; - optional Empty x240 = 240; - optional Empty x241 = 241; - optional Empty x242 = 242; - optional Empty x243 = 243; - optional Empty x244 = 244; - optional Empty x245 = 245; - optional Empty x246 = 246; - optional Empty x247 = 247; - optional Empty x248 = 248; - optional Empty x249 = 249; - optional Empty x250 = 250; -} - -message MessageList { - repeated group Message = 1 { - required string name = 2; - required int32 count = 3; - } -} - -message Strings { - optional string string_field = 1; - optional bytes bytes_field = 2; -} - -message Defaults { - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - } - - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - optional bool F_Bool = 1 [default=true]; - optional int32 F_Int32 = 2 [default=32]; - optional int64 F_Int64 = 3 [default=64]; - optional fixed32 F_Fixed32 = 4 [default=320]; - optional fixed64 F_Fixed64 = 5 [default=640]; - optional uint32 F_Uint32 = 6 [default=3200]; - optional uint64 F_Uint64 = 7 [default=6400]; - optional float F_Float = 8 [default=314159.]; - optional double F_Double = 9 [default=271828.]; - optional string F_String = 10 [default="hello, \"world!\"\n"]; - optional bytes F_Bytes = 11 [default="Bignose"]; - optional sint32 F_Sint32 = 12 [default=-32]; - optional sint64 F_Sint64 = 13 [default=-64]; - optional Color F_Enum = 14 [default=GREEN]; - - // More fields with crazy defaults. - optional float F_Pinf = 15 [default=inf]; - optional float F_Ninf = 16 [default=-inf]; - optional float F_Nan = 17 [default=nan]; - - // Sub-message. - optional SubDefaults sub = 18; - - // Redundant but explicit defaults. - optional string str_zero = 19 [default=""]; -} - -message SubDefaults { - optional int64 n = 1 [default=7]; -} - -message RepeatedEnum { - enum Color { - RED = 1; - } - repeated Color color = 1; -} - -message MoreRepeated { - repeated bool bools = 1; - repeated bool bools_packed = 2 [packed=true]; - repeated int32 ints = 3; - repeated int32 ints_packed = 4 [packed=true]; - repeated int64 int64s_packed = 7 [packed=true]; - repeated string strings = 5; - repeated fixed32 fixeds = 6; -} - -// GroupOld and GroupNew have the same wire format. -// GroupNew has a new field inside a group. - -message GroupOld { - optional group G = 101 { - optional int32 x = 2; - } -} - -message GroupNew { - optional group G = 101 { - optional int32 x = 2; - optional int32 y = 3; - } -} - -message FloatingPoint { - required double f = 1; -} - -message MessageWithMap { - map name_mapping = 1; - map msg_mapping = 2; - map byte_mapping = 3; - map str_to_str = 4; -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text.go deleted file mode 100644 index 365242441..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text.go +++ /dev/null @@ -1,804 +0,0 @@ -// Extensions for Protocol Buffers to create more go like structures. -// -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - gtNewline = []byte(">\n") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Printf("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -var ( - messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem() -) - -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - -func writeStruct(w *textWriter, sv reflect.Value) error { - if sv.Type() == messageSetType { - return writeMessageSet(w, sv.Addr().Interface().(*MessageSet)) - } - - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if len(props.Enum) > 0 { - if err := writeEnum(w, v, props); err != nil { - return err - } - } else if err := writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := writeAny(w, key, props.mkeyprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } - - if len(props.Enum) > 0 { - if err := writeEnum(w, fv, props); err != nil { - return err - } - } else if err := writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if pv.Type().Implements(extendableProtoType) { - if err := writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - -// writeAny writes an arbitrary field. -func writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - if props != nil && len(props.CustomType) > 0 { - var custom Marshaler = v.Interface().(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if err := writeString(w, string(data)); err != nil { - return err - } - return nil - } - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if tm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else if err := writeStruct(w, v); err != nil { - return err - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeMessageSet(w *textWriter, ms *MessageSet) error { - for _, item := range ms.Item { - id := *item.TypeId - if msd, ok := messageSetMap[id]; ok { - // Known message set type. - if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil { - return err - } - w.indent() - - pb := reflect.New(msd.t.Elem()) - if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { - if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { - return err - } - } else { - if err := writeStruct(w, pb.Elem()); err != nil { - return err - } - } - } else { - // Unknown type. - if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil { - return err - } - w.indent() - if err := writeUnknownStruct(w, item.Message); err != nil { - return err - } - } - w.unindent() - if _, err := w.Write(gtNewline); err != nil { - return err - } - } - return nil -} - -func writeUnknownStruct(w *textWriter, data []byte) error { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep := pv.Interface().(extendableProto) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - var m map[int32]Extension - if em, ok := ep.(extensionsMap); ok { - m = em.ExtensionMap() - } else if em, ok := ep.(extensionsBytes); ok { - eb := em.GetExtensions() - var err error - m, err = BytesToExtensionsMap(*eb) - if err != nil { - return err - } - } - - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -func marshalText(w io.Writer, pb Message, compact bool) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: compact, - } - - if tm, ok := pb.(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { - return marshalText(w, pb, false) -} - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { - var buf bytes.Buffer - marshalText(&buf, pb, false) - return buf.String() -} - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { - var buf bytes.Buffer - marshalText(&buf, pb, true) - return buf.String() -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_gogo.go deleted file mode 100644 index cdb23373c..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_gogo.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" -) - -func writeEnum(w *textWriter, v reflect.Value, props *Properties) error { - m, ok := enumStringMaps[props.Enum] - if !ok { - if err := writeAny(w, v, props); err != nil { - return err - } - } - key := int32(0) - if v.Kind() == reflect.Ptr { - key = int32(v.Elem().Int()) - } else { - key = int32(v.Int()) - } - s, ok := m[key] - if !ok { - if err := writeAny(w, v, props); err != nil { - return err - } - } - _, err := fmt.Fprint(w, s) - return err -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser.go deleted file mode 100644 index 9b2fab593..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser.go +++ /dev/null @@ -1,815 +0,0 @@ -// Extensions for Protocol Buffers to create more go like structures. -// -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - base := 8 - ss := s[:2] - s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) - if err != nil { - return "", "", err - } - return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b - } - s = s[n:] - return string(bs), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || p.s[0] != '"' { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) { - sprops := GetProperties(st) - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - reqCount := GetProperties(st).reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]". - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - tok = p.next() - if tok.err != nil { - return tok.err - } - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == tok.value { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", tok.value) - } - // Check the extension terminator. - tok = p.next() - if tok.err != nil { - return tok.err - } - if tok.value != "]" { - return p.errorf("unrecognized extension terminator %q", tok.value) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(extendableProto) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - } else { - // This is a normal, non-extension field. - name := tok.value - fi, props, ok := structFieldByName(st, name) - if !ok { - return p.errorf("unknown field name %q in %v", name, st) - } - - dst := sv.Field(fi) - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // Technically the "key" and "value" could come in any order, - // but in practice they won't. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - if err := p.consumeToken("key"); err != nil { - return err - } - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken("value"); err != nil { - return err - } - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken(terminator); err != nil { - return err - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, st.Field(fi).Type); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } else if props.Required { - reqCount-- - } - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - if len(props.CustomType) > 0 { - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - tc := reflect.TypeOf(new(Marshaler)) - ok := t.Elem().Implements(tc.Elem()) - if ok { - fv := v - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.ValueOf(custom)) - } else { - custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.Indirect(reflect.ValueOf(custom))) - } - return nil - } - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. May already exist. - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(at, flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - case reflect.Bool: - // Either "true", "false", 1 or 0. - switch tok.value { - case "true", "1": - fv.SetBool(true) - return nil - case "false", "0": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err - } - pb.Reset() - v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser_test.go deleted file mode 100644 index f1c623192..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser_test.go +++ /dev/null @@ -1,511 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "math" - "reflect" - "testing" - - . "github.com/gogo/protobuf/proto" - proto3pb "github.com/gogo/protobuf/proto/proto3_proto" - . "github.com/gogo/protobuf/proto/testdata" -) - -type UnmarshalTextTest struct { - in string - err string // if "", no error expected - out *MyMessage -} - -func buildExtStructTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_More, &Ext{ - Data: String("Hello, world!"), - }) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtDataTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_Text, String("Hello, world!")) - SetExtension(msg, E_Ext_Number, Int32(1729)) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtRepStringTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { - panic(err) - } - return UnmarshalTextTest{in: text, out: msg} -} - -var unMarshalTextTests = []UnmarshalTextTest{ - // Basic - { - in: " count:42\n name:\"Dave\" ", - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - }, - }, - - // Empty quoted string - { - in: `count:42 name:""`, - out: &MyMessage{ - Count: Int32(42), - Name: String(""), - }, - }, - - // Quoted string concatenation - { - in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string with escaped apostrophe - { - in: `count:42 name: "HOLIDAY - New Year\'s Day"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("HOLIDAY - New Year's Day"), - }, - }, - - // Quoted string with single quote - { - in: `count:42 name: 'Roger "The Ramster" Ramjet'`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`Roger "The Ramster" Ramjet`), - }, - }, - - // Quoted string with all the accepted special characters from the C++ test - { - in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", - out: &MyMessage{ - Count: Int32(42), - Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), - }, - }, - - // Quoted string with quoted backslash - { - in: `count:42 name: "\\'xyz"`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`\'xyz`), - }, - }, - - // Quoted string with UTF-8 bytes. - { - in: "count:42 name: '\303\277\302\201\xAB'", - out: &MyMessage{ - Count: Int32(42), - Name: String("\303\277\302\201\xAB"), - }, - }, - - // Bad quoted string - { - in: `inner: < host: "\0" >` + "\n", - err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, - }, - - // Number too large for int64 - { - in: "count: 1 others { key: 123456789012345678901 }", - err: "line 1.23: invalid int64: 123456789012345678901", - }, - - // Number too large for int32 - { - in: "count: 1234567890123", - err: "line 1.7: invalid int32: 1234567890123", - }, - - // Number in hexadecimal - { - in: "count: 0x2beef", - out: &MyMessage{ - Count: Int32(0x2beef), - }, - }, - - // Number in octal - { - in: "count: 024601", - out: &MyMessage{ - Count: Int32(024601), - }, - }, - - // Floating point number with "f" suffix - { - in: "count: 4 others:< weight: 17.0f >", - out: &MyMessage{ - Count: Int32(4), - Others: []*OtherMessage{ - { - Weight: Float32(17), - }, - }, - }, - }, - - // Floating point positive infinity - { - in: "count: 4 bigfloat: inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(1)), - }, - }, - - // Floating point negative infinity - { - in: "count: 4 bigfloat: -inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(-1)), - }, - }, - - // Number too large for float32 - { - in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", - err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", - }, - - // Number posing as a quoted string - { - in: `inner: < host: 12 >` + "\n", - err: `line 1.15: invalid string: 12`, - }, - - // Quoted string posing as int32 - { - in: `count: "12"`, - err: `line 1.7: invalid int32: "12"`, - }, - - // Quoted string posing a float32 - { - in: `others:< weight: "17.4" >`, - err: `line 1.17: invalid float32: "17.4"`, - }, - - // Enum - { - in: `count:42 bikeshed: BLUE`, - out: &MyMessage{ - Count: Int32(42), - Bikeshed: MyMessage_BLUE.Enum(), - }, - }, - - // Repeated field - { - in: `count:42 pet: "horsey" pet:"bunny"`, - out: &MyMessage{ - Count: Int32(42), - Pet: []string{"horsey", "bunny"}, - }, - }, - - // Repeated message with/without colon and <>/{} - { - in: `count:42 others:{} others{} others:<> others:{}`, - out: &MyMessage{ - Count: Int32(42), - Others: []*OtherMessage{ - {}, - {}, - {}, - {}, - }, - }, - }, - - // Missing colon for inner message - { - in: `count:42 inner < host: "cauchy.syd" >`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("cauchy.syd"), - }, - }, - }, - - // Missing colon for string field - { - in: `name "Dave"`, - err: `line 1.5: expected ':', found "\"Dave\""`, - }, - - // Missing colon for int32 field - { - in: `count 42`, - err: `line 1.6: expected ':', found "42"`, - }, - - // Missing required field - { - in: `name: "Pawel"`, - err: `proto: required field "testdata.MyMessage.count" not set`, - out: &MyMessage{ - Name: String("Pawel"), - }, - }, - - // Repeated non-repeated field - { - in: `name: "Rob" name: "Russ"`, - err: `line 1.12: non-repeated field "name" was repeated`, - }, - - // Group - { - in: `count: 17 SomeGroup { group_field: 12 }`, - out: &MyMessage{ - Count: Int32(17), - Somegroup: &MyMessage_SomeGroup{ - GroupField: Int32(12), - }, - }, - }, - - // Semicolon between fields - { - in: `count:3;name:"Calvin"`, - out: &MyMessage{ - Count: Int32(3), - Name: String("Calvin"), - }, - }, - // Comma between fields - { - in: `count:4,name:"Ezekiel"`, - out: &MyMessage{ - Count: Int32(4), - Name: String("Ezekiel"), - }, - }, - - // Extension - buildExtStructTest(`count: 42 [testdata.Ext.more]:`), - buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), - buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), - buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), - - // Big all-in-one - { - in: "count:42 # Meaning\n" + - `name:"Dave" ` + - `quote:"\"I didn't want to go.\"" ` + - `pet:"bunny" ` + - `pet:"kitty" ` + - `pet:"horsey" ` + - `inner:<` + - ` host:"footrest.syd" ` + - ` port:7001 ` + - ` connected:true ` + - `> ` + - `others:<` + - ` key:3735928559 ` + - ` value:"\x01A\a\f" ` + - `> ` + - `others:<` + - " weight:58.9 # Atomic weight of Co\n" + - ` inner:<` + - ` host:"lesha.mtv" ` + - ` port:8002 ` + - ` >` + - `>`, - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - Quote: String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &InnerMessage{ - Host: String("footrest.syd"), - Port: Int32(7001), - Connected: Bool(true), - }, - Others: []*OtherMessage{ - { - Key: Int64(3735928559), - Value: []byte{0x1, 'A', '\a', '\f'}, - }, - { - Weight: Float32(58.9), - Inner: &InnerMessage{ - Host: String("lesha.mtv"), - Port: Int32(8002), - }, - }, - }, - }, - }, -} - -func TestUnmarshalText(t *testing.T) { - for i, test := range unMarshalTextTests { - pb := new(MyMessage) - err := UnmarshalText(test.in, pb) - if test.err == "" { - // We don't expect failure. - if err != nil { - t.Errorf("Test %d: Unexpected error: %v", i, err) - } else if !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } else { - // We do expect failure. - if err == nil { - t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) - } else if err.Error() != test.err { - t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", - i, err.Error(), test.err) - } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } - } -} - -func TestUnmarshalTextCustomMessage(t *testing.T) { - msg := &textMessage{} - if err := UnmarshalText("custom", msg); err != nil { - t.Errorf("Unexpected error from custom unmarshal: %v", err) - } - if UnmarshalText("not custom", msg) == nil { - t.Errorf("Didn't get expected error from custom unmarshal") - } -} - -// Regression test; this caused a panic. -func TestRepeatedEnum(t *testing.T) { - pb := new(RepeatedEnum) - if err := UnmarshalText("color: RED", pb); err != nil { - t.Fatal(err) - } - exp := &RepeatedEnum{ - Color: []RepeatedEnum_Color{RepeatedEnum_RED}, - } - if !Equal(pb, exp) { - t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) - } -} - -func TestProto3TextParsing(t *testing.T) { - m := new(proto3pb.Message) - const in = `name: "Wallace" true_scotsman: true` - want := &proto3pb.Message{ - Name: "Wallace", - TrueScotsman: true, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -func TestMapParsing(t *testing.T) { - m := new(MessageWithMap) - const in = `name_mapping: name_mapping:` + - `msg_mapping:,>` + // separating commas are okay - `msg_mapping>` + // no colon after "value" - `byte_mapping:` - want := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Beatles", - 1234: "Feist", - }, - MsgMapping: map[int64]*FloatingPoint{ - -4: {F: Float64(2.0)}, - -2: {F: Float64(4.0)}, - }, - ByteMapping: map[bool][]byte{ - true: []byte("so be it"), - }, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -var benchInput string - -func init() { - benchInput = "count: 4\n" - for i := 0; i < 1000; i++ { - benchInput += "pet: \"fido\"\n" - } - - // Check it is valid input. - pb := new(MyMessage) - err := UnmarshalText(benchInput, pb) - if err != nil { - panic("Bad benchmark input: " + err.Error()) - } -} - -func BenchmarkUnmarshalText(b *testing.B) { - pb := new(MyMessage) - for i := 0; i < b.N; i++ { - UnmarshalText(benchInput, pb) - } - b.SetBytes(int64(len(benchInput))) -} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_test.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_test.go deleted file mode 100644 index a98504ef3..000000000 --- a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_test.go +++ /dev/null @@ -1,450 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "errors" - "io/ioutil" - "math" - "strings" - "testing" - - "github.com/gogo/protobuf/proto" - - proto3pb "github.com/gogo/protobuf/proto/proto3_proto" - pb "github.com/gogo/protobuf/proto/testdata" -) - -// textMessage implements the methods that allow it to marshal and unmarshal -// itself as text. -type textMessage struct { -} - -func (*textMessage) MarshalText() ([]byte, error) { - return []byte("custom"), nil -} - -func (*textMessage) UnmarshalText(bytes []byte) error { - if string(bytes) != "custom" { - return errors.New("expected 'custom'") - } - return nil -} - -func (*textMessage) Reset() {} -func (*textMessage) String() string { return "" } -func (*textMessage) ProtoMessage() {} - -func newTestMessage() *pb.MyMessage { - msg := &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Quote: proto.String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("footrest.syd"), - Port: proto.Int32(7001), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(0xdeadbeef), - Value: []byte{1, 65, 7, 12}, - }, - { - Weight: proto.Float32(6.022), - Inner: &pb.InnerMessage{ - Host: proto.String("lesha.mtv"), - Port: proto.Int32(8002), - }, - }, - }, - Bikeshed: pb.MyMessage_BLUE.Enum(), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(8), - }, - // One normally wouldn't do this. - // This is an undeclared tag 13, as a varint (wire type 0) with value 4. - XXX_unrecognized: []byte{13<<3 | 0, 4}, - } - ext := &pb.Ext{ - Data: proto.String("Big gobs for big rats"), - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { - panic(err) - } - greetings := []string{"adg", "easy", "cow"} - if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { - panic(err) - } - - // Add an unknown extension. We marshal a pb.Ext, and fake the ID. - b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) - if err != nil { - panic(err) - } - b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) - proto.SetRawExtension(msg, 201, b) - - // Extensions can be plain fields, too, so let's test that. - b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) - proto.SetRawExtension(msg, 202, b) - - return msg -} - -const text = `count: 42 -name: "Dave" -quote: "\"I didn't want to go.\"" -pet: "bunny" -pet: "kitty" -pet: "horsey" -inner: < - host: "footrest.syd" - port: 7001 - connected: true -> -others: < - key: 3735928559 - value: "\001A\007\014" -> -others: < - weight: 6.022 - inner: < - host: "lesha.mtv" - port: 8002 - > -> -bikeshed: BLUE -SomeGroup { - group_field: 8 -} -/* 2 unknown bytes */ -13: 4 -[testdata.Ext.more]: < - data: "Big gobs for big rats" -> -[testdata.greeting]: "adg" -[testdata.greeting]: "easy" -[testdata.greeting]: "cow" -/* 13 unknown bytes */ -201: "\t3G skiing" -/* 3 unknown bytes */ -202: 19 -` - -func TestMarshalText(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, newTestMessage()); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != text { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) - } -} - -func TestMarshalTextCustomMessage(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, &textMessage{}); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != "custom" { - t.Errorf("Got %q, expected %q", s, "custom") - } -} -func TestMarshalTextNil(t *testing.T) { - want := "" - tests := []proto.Message{nil, (*pb.MyMessage)(nil)} - for i, test := range tests { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, test); err != nil { - t.Fatal(err) - } - if got := buf.String(); got != want { - t.Errorf("%d: got %q want %q", i, got, want) - } - } -} - -func TestMarshalTextUnknownEnum(t *testing.T) { - // The Color enum only specifies values 0-2. - m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} - got := m.String() - const want = `bikeshed:3 ` - if got != want { - t.Errorf("\n got %q\nwant %q", got, want) - } -} - -func BenchmarkMarshalTextBuffered(b *testing.B) { - buf := new(bytes.Buffer) - m := newTestMessage() - for i := 0; i < b.N; i++ { - buf.Reset() - proto.MarshalText(buf, m) - } -} - -func BenchmarkMarshalTextUnbuffered(b *testing.B) { - w := ioutil.Discard - m := newTestMessage() - for i := 0; i < b.N; i++ { - proto.MarshalText(w, m) - } -} - -func compact(src string) string { - // s/[ \n]+/ /g; s/ $//; - dst := make([]byte, len(src)) - space, comment := false, false - j := 0 - for i := 0; i < len(src); i++ { - if strings.HasPrefix(src[i:], "/*") { - comment = true - i++ - continue - } - if comment && strings.HasPrefix(src[i:], "*/") { - comment = false - i++ - continue - } - if comment { - continue - } - c := src[i] - if c == ' ' || c == '\n' { - space = true - continue - } - if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { - space = false - } - if c == '{' { - space = false - } - if space { - dst[j] = ' ' - j++ - space = false - } - dst[j] = c - j++ - } - if space { - dst[j] = ' ' - j++ - } - return string(dst[0:j]) -} - -var compactText = compact(text) - -func TestCompactText(t *testing.T) { - s := proto.CompactTextString(newTestMessage()) - if s != compactText { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) - } -} - -func TestStringEscaping(t *testing.T) { - testCases := []struct { - in *pb.Strings - out string - }{ - { - // Test data from C++ test (TextFormatTest.StringEscape). - // Single divergence: we don't escape apostrophes. - &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, - "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", - }, - { - // Test data from the same C++ test. - &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, - "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", - }, - { - // Some UTF-8. - &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, - `string_field: "\000\001\377\201"` + "\n", - }, - } - - for i, tc := range testCases { - var buf bytes.Buffer - if err := proto.MarshalText(&buf, tc.in); err != nil { - t.Errorf("proto.MarsalText: %v", err) - continue - } - s := buf.String() - if s != tc.out { - t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) - continue - } - - // Check round-trip. - pb := new(pb.Strings) - if err := proto.UnmarshalText(s, pb); err != nil { - t.Errorf("#%d: UnmarshalText: %v", i, err) - continue - } - if !proto.Equal(pb, tc.in) { - t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) - } - } -} - -// A limitedWriter accepts some output before it fails. -// This is a proxy for something like a nearly-full or imminently-failing disk, -// or a network connection that is about to die. -type limitedWriter struct { - b bytes.Buffer - limit int -} - -var outOfSpace = errors.New("proto: insufficient space") - -func (w *limitedWriter) Write(p []byte) (n int, err error) { - var avail = w.limit - w.b.Len() - if avail <= 0 { - return 0, outOfSpace - } - if len(p) <= avail { - return w.b.Write(p) - } - n, _ = w.b.Write(p[:avail]) - return n, outOfSpace -} - -func TestMarshalTextFailing(t *testing.T) { - // Try lots of different sizes to exercise more error code-paths. - for lim := 0; lim < len(text); lim++ { - buf := new(limitedWriter) - buf.limit = lim - err := proto.MarshalText(buf, newTestMessage()) - // We expect a certain error, but also some partial results in the buffer. - if err != outOfSpace { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) - } - s := buf.b.String() - x := text[:buf.limit] - if s != x { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) - } - } -} - -func TestFloats(t *testing.T) { - tests := []struct { - f float64 - want string - }{ - {0, "0"}, - {4.7, "4.7"}, - {math.Inf(1), "inf"}, - {math.Inf(-1), "-inf"}, - {math.NaN(), "nan"}, - } - for _, test := range tests { - msg := &pb.FloatingPoint{F: &test.f} - got := strings.TrimSpace(msg.String()) - want := `f:` + test.want - if got != want { - t.Errorf("f=%f: got %q, want %q", test.f, got, want) - } - } -} - -func TestRepeatedNilText(t *testing.T) { - m := &pb.MessageList{ - Message: []*pb.MessageList_Message{ - nil, - { - Name: proto.String("Horse"), - }, - nil, - }, - } - want := `Message -Message { - name: "Horse" -} -Message -` - if s := proto.MarshalTextString(m); s != want { - t.Errorf(" got: %s\nwant: %s", s, want) - } -} - -func TestProto3Text(t *testing.T) { - tests := []struct { - m proto.Message - want string - }{ - // zero message - {&proto3pb.Message{}, ``}, - // zero message except for an empty byte slice - {&proto3pb.Message{Data: []byte{}}, ``}, - // trivial case - {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, - // empty map - {&pb.MessageWithMap{}, ``}, - // non-empty map; map format is the same as a repeated struct, - // and they are sorted by key (numerically for numeric keys). - { - &pb.MessageWithMap{NameMapping: map[int32]string{ - -1: "Negatory", - 7: "Lucky", - 1234: "Feist", - 6345789: "Otis", - }}, - `name_mapping: ` + - `name_mapping: ` + - `name_mapping: ` + - `name_mapping:`, - }, - // map with nil value; not well-defined, but we shouldn't crash - { - &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, - `msg_mapping:`, - }, - } - for _, test := range tests { - got := strings.TrimSpace(test.m.String()) - if got != test.want { - t.Errorf("\n got %s\nwant %s", got, test.want) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index f1f06564a..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto - make diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go deleted file mode 100644 index b787d58aa..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go +++ /dev/null @@ -1,2104 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "math/rand" - "reflect" - "runtime/debug" - "strings" - "testing" - "time" - - . "github.com/golang/protobuf/proto" - . "github.com/golang/protobuf/proto/testdata" -) - -var globalO *Buffer - -func old() *Buffer { - if globalO == nil { - globalO = NewBuffer(nil) - } - globalO.Reset() - return globalO -} - -func equalbytes(b1, b2 []byte, t *testing.T) { - if len(b1) != len(b2) { - t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) - return - } - for i := 0; i < len(b1); i++ { - if b1[i] != b2[i] { - t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) - } - } -} - -func initGoTestField() *GoTestField { - f := new(GoTestField) - f.Label = String("label") - f.Type = String("type") - return f -} - -// These are all structurally equivalent but the tag numbers differ. -// (It's remarkable that required, optional, and repeated all have -// 8 letters.) -func initGoTest_RequiredGroup() *GoTest_RequiredGroup { - return &GoTest_RequiredGroup{ - RequiredField: String("required"), - } -} - -func initGoTest_OptionalGroup() *GoTest_OptionalGroup { - return &GoTest_OptionalGroup{ - RequiredField: String("optional"), - } -} - -func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { - return &GoTest_RepeatedGroup{ - RequiredField: String("repeated"), - } -} - -func initGoTest(setdefaults bool) *GoTest { - pb := new(GoTest) - if setdefaults { - pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) - pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) - pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) - pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) - pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) - pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) - pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) - pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) - pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) - pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) - pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted - pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) - pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) - } - - pb.Kind = GoTest_TIME.Enum() - pb.RequiredField = initGoTestField() - pb.F_BoolRequired = Bool(true) - pb.F_Int32Required = Int32(3) - pb.F_Int64Required = Int64(6) - pb.F_Fixed32Required = Uint32(32) - pb.F_Fixed64Required = Uint64(64) - pb.F_Uint32Required = Uint32(3232) - pb.F_Uint64Required = Uint64(6464) - pb.F_FloatRequired = Float32(3232) - pb.F_DoubleRequired = Float64(6464) - pb.F_StringRequired = String("string") - pb.F_BytesRequired = []byte("bytes") - pb.F_Sint32Required = Int32(-32) - pb.F_Sint64Required = Int64(-64) - pb.Requiredgroup = initGoTest_RequiredGroup() - - return pb -} - -func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { - data := b.Bytes() - ld := len(data) - ls := len(s) / 2 - - fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) - - // find the interesting spot - n - n := ls - if ld < ls { - n = ld - } - j := 0 - for i := 0; i < n; i++ { - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - if data[i] == bs { - continue - } - n = i - break - } - l := n - 10 - if l < 0 { - l = 0 - } - h := n + 10 - - // find the interesting spot - n - fmt.Printf("is[%d]:", l) - for i := l; i < h; i++ { - if i >= ld { - fmt.Printf(" --") - continue - } - fmt.Printf(" %.2x", data[i]) - } - fmt.Printf("\n") - - fmt.Printf("sb[%d]:", l) - for i := l; i < h; i++ { - if i >= ls { - fmt.Printf(" --") - continue - } - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - fmt.Printf(" %.2x", bs) - } - fmt.Printf("\n") - - t.Fail() - - // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) - // Print the output in a partially-decoded format; can - // be helpful when updating the test. It produces the output - // that is pasted, with minor edits, into the argument to verify(). - // data := b.Bytes() - // nesting := 0 - // for b.Len() > 0 { - // start := len(data) - b.Len() - // var u uint64 - // u, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // wire := u & 0x7 - // tag := u >> 3 - // switch wire { - // case WireVarint: - // v, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed32: - // v, err := DecodeFixed32(b) - // if err != nil { - // fmt.Printf("decode error on fixed32:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed64: - // v, err := DecodeFixed64(b) - // if err != nil { - // fmt.Printf("decode error on fixed64:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireBytes: - // nb, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // after_tag := len(data) - b.Len() - // str := make([]byte, nb) - // _, err = b.Read(str) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", - // data[start:after_tag], str, tag, wire) - // case WireStartGroup: - // nesting++ - // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // case WireEndGroup: - // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // nesting-- - // default: - // fmt.Printf("unrecognized wire type %d\n", wire) - // return - // } - // } -} - -func hex(c uint8) uint8 { - if '0' <= c && c <= '9' { - return c - '0' - } - if 'a' <= c && c <= 'f' { - return 10 + c - 'a' - } - if 'A' <= c && c <= 'F' { - return 10 + c - 'A' - } - return 0 -} - -func equal(b []byte, s string, t *testing.T) bool { - if 2*len(b) != len(s) { - // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) - fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) - return false - } - for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { - x := hex(s[j])*16 + hex(s[j+1]) - if b[i] != x { - // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) - fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) - return false - } - } - return true -} - -func overify(t *testing.T, pb *GoTest, expected string) { - o := old() - err := o.Marshal(pb) - if err != nil { - fmt.Printf("overify marshal-1 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 1", o.Bytes()) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = o.Unmarshal(pbd) - if err != nil { - t.Fatalf("overify unmarshal err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - o.Reset() - err = o.Marshal(pbd) - if err != nil { - t.Errorf("overify marshal-2 err = %v", err) - o.DebugPrint("", o.Bytes()) - t.Fatalf("string = %s", expected) - } - if !equal(o.Bytes(), expected, t) { - o.DebugPrint("overify neq 2", o.Bytes()) - t.Fatalf("string = %s", expected) - } -} - -// Simple tests for numeric encode/decode primitives (varint, etc.) -func TestNumericPrimitives(t *testing.T) { - for i := uint64(0); i < 1e6; i += 111 { - o := old() - if o.EncodeVarint(i) != nil { - t.Error("EncodeVarint") - break - } - x, e := o.DecodeVarint() - if e != nil { - t.Fatal("DecodeVarint") - } - if x != i { - t.Fatal("varint decode fail:", i, x) - } - - o = old() - if o.EncodeFixed32(i) != nil { - t.Fatal("encFixed32") - } - x, e = o.DecodeFixed32() - if e != nil { - t.Fatal("decFixed32") - } - if x != i { - t.Fatal("fixed32 decode fail:", i, x) - } - - o = old() - if o.EncodeFixed64(i*1234567) != nil { - t.Error("encFixed64") - break - } - x, e = o.DecodeFixed64() - if e != nil { - t.Error("decFixed64") - break - } - if x != i*1234567 { - t.Error("fixed64 decode fail:", i*1234567, x) - break - } - - o = old() - i32 := int32(i - 12345) - if o.EncodeZigzag32(uint64(i32)) != nil { - t.Fatal("EncodeZigzag32") - } - x, e = o.DecodeZigzag32() - if e != nil { - t.Fatal("DecodeZigzag32") - } - if x != uint64(uint32(i32)) { - t.Fatal("zigzag32 decode fail:", i32, x) - } - - o = old() - i64 := int64(i - 12345) - if o.EncodeZigzag64(uint64(i64)) != nil { - t.Fatal("EncodeZigzag64") - } - x, e = o.DecodeZigzag64() - if e != nil { - t.Fatal("DecodeZigzag64") - } - if x != uint64(i64) { - t.Fatal("zigzag64 decode fail:", i64, x) - } - } -} - -// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. -type fakeMarshaler struct { - b []byte - err error -} - -func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } -func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } -func (f *fakeMarshaler) ProtoMessage() {} -func (f *fakeMarshaler) Reset() {} - -type msgWithFakeMarshaler struct { - M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"` -} - -func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) } -func (m *msgWithFakeMarshaler) ProtoMessage() {} -func (m *msgWithFakeMarshaler) Reset() {} - -// Simple tests for proto messages that implement the Marshaler interface. -func TestMarshalerEncoding(t *testing.T) { - tests := []struct { - name string - m Message - want []byte - wantErr error - }{ - { - name: "Marshaler that fails", - m: &fakeMarshaler{ - err: errors.New("some marshal err"), - b: []byte{5, 6, 7}, - }, - // Since there's an error, nothing should be written to buffer. - want: nil, - wantErr: errors.New("some marshal err"), - }, - { - name: "Marshaler that fails with RequiredNotSetError", - m: &msgWithFakeMarshaler{ - M: &fakeMarshaler{ - err: &RequiredNotSetError{}, - b: []byte{5, 6, 7}, - }, - }, - // Since there's an error that can be continued after, - // the buffer should be written. - want: []byte{ - 10, 3, // for &msgWithFakeMarshaler - 5, 6, 7, // for &fakeMarshaler - }, - wantErr: &RequiredNotSetError{}, - }, - { - name: "Marshaler that succeeds", - m: &fakeMarshaler{ - b: []byte{0, 1, 2, 3, 4, 127, 255}, - }, - want: []byte{0, 1, 2, 3, 4, 127, 255}, - wantErr: nil, - }, - } - for _, test := range tests { - b := NewBuffer(nil) - err := b.Marshal(test.m) - if _, ok := err.(*RequiredNotSetError); ok { - // We're not in package proto, so we can only assert the type in this case. - err = &RequiredNotSetError{} - } - if !reflect.DeepEqual(test.wantErr, err) { - t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) - } - if !reflect.DeepEqual(test.want, b.Bytes()) { - t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) - } - } -} - -// Simple tests for bytes -func TestBytesPrimitives(t *testing.T) { - o := old() - bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} - if o.EncodeRawBytes(bytes) != nil { - t.Error("EncodeRawBytes") - } - decb, e := o.DecodeRawBytes(false) - if e != nil { - t.Error("DecodeRawBytes") - } - equalbytes(bytes, decb, t) -} - -// Simple tests for strings -func TestStringPrimitives(t *testing.T) { - o := old() - s := "now is the time" - if o.EncodeStringBytes(s) != nil { - t.Error("enc_string") - } - decs, e := o.DecodeStringBytes() - if e != nil { - t.Error("dec_string") - } - if s != decs { - t.Error("string encode/decode fail:", s, decs) - } -} - -// Do we catch the "required bit not set" case? -func TestRequiredBit(t *testing.T) { - o := old() - pb := new(GoTest) - err := o.Marshal(pb) - if err == nil { - t.Error("did not catch missing required fields") - } else if strings.Index(err.Error(), "Kind") < 0 { - t.Error("wrong error type:", err) - } -} - -// Check that all fields are nil. -// Clearly silly, and a residue from a more interesting test with an earlier, -// different initialization property, but it once caught a compiler bug so -// it lives. -func checkInitialized(pb *GoTest, t *testing.T) { - if pb.F_BoolDefaulted != nil { - t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) - } - if pb.F_Int32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) - } - if pb.F_Int64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) - } - if pb.F_Fixed32Defaulted != nil { - t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) - } - if pb.F_Fixed64Defaulted != nil { - t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) - } - if pb.F_Uint32Defaulted != nil { - t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) - } - if pb.F_Uint64Defaulted != nil { - t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) - } - if pb.F_FloatDefaulted != nil { - t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) - } - if pb.F_DoubleDefaulted != nil { - t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) - } - if pb.F_StringDefaulted != nil { - t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) - } - if pb.F_BytesDefaulted != nil { - t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) - } - if pb.F_Sint32Defaulted != nil { - t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) - } - if pb.F_Sint64Defaulted != nil { - t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) - } -} - -// Does Reset() reset? -func TestReset(t *testing.T) { - pb := initGoTest(true) - // muck with some values - pb.F_BoolDefaulted = Bool(false) - pb.F_Int32Defaulted = Int32(237) - pb.F_Int64Defaulted = Int64(12346) - pb.F_Fixed32Defaulted = Uint32(32000) - pb.F_Fixed64Defaulted = Uint64(666) - pb.F_Uint32Defaulted = Uint32(323232) - pb.F_Uint64Defaulted = nil - pb.F_FloatDefaulted = nil - pb.F_DoubleDefaulted = Float64(0) - pb.F_StringDefaulted = String("gotcha") - pb.F_BytesDefaulted = []byte("asdfasdf") - pb.F_Sint32Defaulted = Int32(123) - pb.F_Sint64Defaulted = Int64(789) - pb.Reset() - checkInitialized(pb, t) -} - -// All required fields set, no defaults provided. -func TestEncodeDecode1(t *testing.T) { - pb := initGoTest(false) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 0x20 - "714000000000000000"+ // field 14, encoding 1, value 0x40 - "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 - "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" - "b304"+ // field 70, encoding 3, start group - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // field 70, encoding 4, end group - "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f") // field 103, encoding 0, 0x7f zigzag64 -} - -// All required fields set, defaults provided. -func TestEncodeDecode2(t *testing.T) { - pb := initGoTest(true) - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All default fields set to their default value by hand -func TestEncodeDecode3(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolDefaulted = Bool(true) - pb.F_Int32Defaulted = Int32(32) - pb.F_Int64Defaulted = Int64(64) - pb.F_Fixed32Defaulted = Uint32(320) - pb.F_Fixed64Defaulted = Uint64(640) - pb.F_Uint32Defaulted = Uint32(3200) - pb.F_Uint64Defaulted = Uint64(6400) - pb.F_FloatDefaulted = Float32(314159) - pb.F_DoubleDefaulted = Float64(271828) - pb.F_StringDefaulted = String("hello, \"world!\"\n") - pb.F_BytesDefaulted = []byte("Bignose") - pb.F_Sint32Defaulted = Int32(-32) - pb.F_Sint64Defaulted = Int64(-64) - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all non-defaulted optional fields have values. -func TestEncodeDecode4(t *testing.T) { - pb := initGoTest(true) - pb.Table = String("hello") - pb.Param = Int32(7) - pb.OptionalField = initGoTestField() - pb.F_BoolOptional = Bool(true) - pb.F_Int32Optional = Int32(32) - pb.F_Int64Optional = Int64(64) - pb.F_Fixed32Optional = Uint32(3232) - pb.F_Fixed64Optional = Uint64(6464) - pb.F_Uint32Optional = Uint32(323232) - pb.F_Uint64Optional = Uint64(646464) - pb.F_FloatOptional = Float32(32.) - pb.F_DoubleOptional = Float64(64.) - pb.F_StringOptional = String("hello") - pb.F_BytesOptional = []byte("Bignose") - pb.F_Sint32Optional = Int32(-32) - pb.F_Sint64Optional = Int64(-64) - pb.Optionalgroup = initGoTest_OptionalGroup() - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" - "1807"+ // field 3, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "f00101"+ // field 30, encoding 0, value 1 - "f80120"+ // field 31, encoding 0, value 32 - "800240"+ // field 32, encoding 0, value 64 - "8d02a00c0000"+ // field 33, encoding 5, value 3232 - "91024019000000000000"+ // field 34, encoding 1, value 6464 - "9802a0dd13"+ // field 35, encoding 0, value 323232 - "a002c0ba27"+ // field 36, encoding 0, value 646464 - "ad0200000042"+ // field 37, encoding 5, value 32.0 - "b1020000000000005040"+ // field 38, encoding 1, value 64.0 - "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "d305"+ // start group field 90 level 1 - "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" - "d405"+ // end group field 90 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" - "f0123f"+ // field 302, encoding 0, value 63 - "f8127f"+ // field 303, encoding 0, value 127 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestEncodeDecode5(t *testing.T) { - pb := initGoTest(true) - pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} - pb.F_BoolRepeated = []bool{false, true} - pb.F_Int32Repeated = []int32{32, 33} - pb.F_Int64Repeated = []int64{64, 65} - pb.F_Fixed32Repeated = []uint32{3232, 3333} - pb.F_Fixed64Repeated = []uint64{6464, 6565} - pb.F_Uint32Repeated = []uint32{323232, 333333} - pb.F_Uint64Repeated = []uint64{646464, 656565} - pb.F_FloatRepeated = []float32{32., 33.} - pb.F_DoubleRepeated = []float64{64., 65.} - pb.F_StringRepeated = []string{"hello", "sailor"} - pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} - pb.F_Sint32Repeated = []int32{32, -32} - pb.F_Sint64Repeated = []int64{64, -64} - pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "a00100"+ // field 20, encoding 0, value 0 - "a00101"+ // field 20, encoding 0, value 1 - "a80120"+ // field 21, encoding 0, value 32 - "a80121"+ // field 21, encoding 0, value 33 - "b00140"+ // field 22, encoding 0, value 64 - "b00141"+ // field 22, encoding 0, value 65 - "bd01a00c0000"+ // field 23, encoding 5, value 3232 - "bd01050d0000"+ // field 23, encoding 5, value 3333 - "c1014019000000000000"+ // field 24, encoding 1, value 6464 - "c101a519000000000000"+ // field 24, encoding 1, value 6565 - "c801a0dd13"+ // field 25, encoding 0, value 323232 - "c80195ac14"+ // field 25, encoding 0, value 333333 - "d001c0ba27"+ // field 26, encoding 0, value 646464 - "d001b58928"+ // field 26, encoding 0, value 656565 - "dd0100000042"+ // field 27, encoding 5, value 32.0 - "dd0100000442"+ // field 27, encoding 5, value 33.0 - "e1010000000000005040"+ // field 28, encoding 1, value 64.0 - "e1010000000000405040"+ // field 28, encoding 1, value 65.0 - "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" - "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" - "c00201"+ // field 40, encoding 0, value 1 - "c80220"+ // field 41, encoding 0, value 32 - "d00240"+ // field 42, encoding 0, value 64 - "dd0240010000"+ // field 43, encoding 5, value 320 - "e1028002000000000000"+ // field 44, encoding 1, value 640 - "e8028019"+ // field 45, encoding 0, value 3200 - "f0028032"+ // field 46, encoding 0, value 6400 - "fd02e0659948"+ // field 47, encoding 5, value 314159.0 - "81030000000050971041"+ // field 48, encoding 1, value 271828.0 - "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "8305"+ // start group field 80 level 1 - "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" - "8405"+ // end group field 80 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "ca0c03"+"626967"+ // field 201, encoding 2, string "big" - "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" - "d00c40"+ // field 202, encoding 0, value 32 - "d00c3f"+ // field 202, encoding 0, value -32 - "d80c8001"+ // field 203, encoding 0, value 64 - "d80c7f"+ // field 203, encoding 0, value -64 - "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" - "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 - -} - -// All required fields set, all packed repeated fields given two values. -func TestEncodeDecode6(t *testing.T) { - pb := initGoTest(false) - pb.F_BoolRepeatedPacked = []bool{false, true} - pb.F_Int32RepeatedPacked = []int32{32, 33} - pb.F_Int64RepeatedPacked = []int64{64, 65} - pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} - pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} - pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} - pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} - pb.F_FloatRepeatedPacked = []float32{32., 33.} - pb.F_DoubleRepeatedPacked = []float64{64., 65.} - pb.F_Sint32RepeatedPacked = []int32{32, -32} - pb.F_Sint64RepeatedPacked = []int64{64, -64} - - overify(t, pb, - "0807"+ // field 1, encoding 0, value 7 - "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) - "5001"+ // field 10, encoding 0, value 1 - "5803"+ // field 11, encoding 0, value 3 - "6006"+ // field 12, encoding 0, value 6 - "6d20000000"+ // field 13, encoding 5, value 32 - "714000000000000000"+ // field 14, encoding 1, value 64 - "78a019"+ // field 15, encoding 0, value 3232 - "8001c032"+ // field 16, encoding 0, value 6464 - "8d0100004a45"+ // field 17, encoding 5, value 3232.0 - "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 - "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" - "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 - "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 - "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 - "aa0308"+ // field 53, encoding 2, 8 bytes - "a00c0000050d0000"+ // value 3232, value 3333 - "b20310"+ // field 54, encoding 2, 16 bytes - "4019000000000000a519000000000000"+ // value 6464, value 6565 - "ba0306"+ // field 55, encoding 2, 6 bytes - "a0dd1395ac14"+ // value 323232, value 333333 - "c20306"+ // field 56, encoding 2, 6 bytes - "c0ba27b58928"+ // value 646464, value 656565 - "ca0308"+ // field 57, encoding 2, 8 bytes - "0000004200000442"+ // value 32.0, value 33.0 - "d20310"+ // field 58, encoding 2, 16 bytes - "00000000000050400000000000405040"+ // value 64.0, value 65.0 - "b304"+ // start group field 70 level 1 - "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" - "b404"+ // end group field 70 level 1 - "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" - "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 - "b21f02"+ // field 502, encoding 2, 2 bytes - "403f"+ // value 32, value -32 - "ba1f03"+ // field 503, encoding 2, 3 bytes - "80017f") // value 64, value -64 -} - -// Test that we can encode empty bytes fields. -func TestEncodeDecodeBytes1(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRequired = []byte{} - pb.F_BytesRepeated = [][]byte{{}} - pb.F_BytesOptional = []byte{} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { - t.Error("required empty bytes field is incorrect") - } - if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { - t.Error("repeated empty bytes field is incorrect") - } - if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { - t.Error("optional empty bytes field is incorrect") - } -} - -// Test that we encode nil-valued fields of a repeated bytes field correctly. -// Since entries in a repeated field cannot be nil, nil must mean empty value. -func TestEncodeDecodeBytes2(t *testing.T) { - pb := initGoTest(false) - - // Create our bytes - pb.F_BytesRepeated = [][]byte{nil} - - d, err := Marshal(pb) - if err != nil { - t.Error(err) - } - - pbd := new(GoTest) - if err := Unmarshal(d, pbd); err != nil { - t.Error(err) - } - - if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { - t.Error("Unexpected value for repeated bytes field") - } -} - -// All required fields set, defaults provided, all repeated fields given two values. -func TestSkippingUnrecognizedFields(t *testing.T) { - o := old() - pb := initGoTestField() - - // Marshal it normally. - o.Marshal(pb) - - // Now new a GoSkipTest record. - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - // Marshal it into same buffer. - o.Marshal(skip) - - pbd := new(GoTestField) - o.Unmarshal(pbd) - - // The __unrecognized field should be a marshaling of GoSkipTest - skipd := new(GoSkipTest) - - o.SetBuf(pbd.XXX_unrecognized) - o.Unmarshal(skipd) - - if *skipd.SkipInt32 != *skip.SkipInt32 { - t.Error("skip int32", skipd.SkipInt32) - } - if *skipd.SkipFixed32 != *skip.SkipFixed32 { - t.Error("skip fixed32", skipd.SkipFixed32) - } - if *skipd.SkipFixed64 != *skip.SkipFixed64 { - t.Error("skip fixed64", skipd.SkipFixed64) - } - if *skipd.SkipString != *skip.SkipString { - t.Error("skip string", *skipd.SkipString) - } - if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { - t.Error("skip group int32", skipd.Skipgroup.GroupInt32) - } - if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { - t.Error("skip group string", *skipd.Skipgroup.GroupString) - } -} - -// Check that unrecognized fields of a submessage are preserved. -func TestSubmessageUnrecognizedFields(t *testing.T) { - nm := &NewMessage{ - Nested: &NewMessage_Nested{ - Name: String("Nigel"), - FoodGroup: String("carbs"), - }, - } - b, err := Marshal(nm) - if err != nil { - t.Fatalf("Marshal of NewMessage: %v", err) - } - - // Unmarshal into an OldMessage. - om := new(OldMessage) - if err := Unmarshal(b, om); err != nil { - t.Fatalf("Unmarshal to OldMessage: %v", err) - } - exp := &OldMessage{ - Nested: &OldMessage_Nested{ - Name: String("Nigel"), - // normal protocol buffer users should not do this - XXX_unrecognized: []byte("\x12\x05carbs"), - }, - } - if !Equal(om, exp) { - t.Errorf("om = %v, want %v", om, exp) - } - - // Clone the OldMessage. - om = Clone(om).(*OldMessage) - if !Equal(om, exp) { - t.Errorf("Clone(om) = %v, want %v", om, exp) - } - - // Marshal the OldMessage, then unmarshal it into an empty NewMessage. - if b, err = Marshal(om); err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - t.Logf("Marshal(%v) -> %q", om, b) - nm2 := new(NewMessage) - if err := Unmarshal(b, nm2); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - if !Equal(nm, nm2) { - t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) - } -} - -// Check that an int32 field can be upgraded to an int64 field. -func TestNegativeInt32(t *testing.T) { - om := &OldMessage{ - Num: Int32(-1), - } - b, err := Marshal(om) - if err != nil { - t.Fatalf("Marshal of OldMessage: %v", err) - } - - // Check the size. It should be 11 bytes; - // 1 for the field/wire type, and 10 for the negative number. - if len(b) != 11 { - t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) - } - - // Unmarshal into a NewMessage. - nm := new(NewMessage) - if err := Unmarshal(b, nm); err != nil { - t.Fatalf("Unmarshal to NewMessage: %v", err) - } - want := &NewMessage{ - Num: Int64(-1), - } - if !Equal(nm, want) { - t.Errorf("nm = %v, want %v", nm, want) - } -} - -// Check that we can grow an array (repeated field) to have many elements. -// This test doesn't depend only on our encoding; for variety, it makes sure -// we create, encode, and decode the correct contents explicitly. It's therefore -// a bit messier. -// This test also uses (and hence tests) the Marshal/Unmarshal functions -// instead of the methods. -func TestBigRepeated(t *testing.T) { - pb := initGoTest(true) - - // Create the arrays - const N = 50 // Internally the library starts much smaller. - pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) - pb.F_Sint64Repeated = make([]int64, N) - pb.F_Sint32Repeated = make([]int32, N) - pb.F_BytesRepeated = make([][]byte, N) - pb.F_StringRepeated = make([]string, N) - pb.F_DoubleRepeated = make([]float64, N) - pb.F_FloatRepeated = make([]float32, N) - pb.F_Uint64Repeated = make([]uint64, N) - pb.F_Uint32Repeated = make([]uint32, N) - pb.F_Fixed64Repeated = make([]uint64, N) - pb.F_Fixed32Repeated = make([]uint32, N) - pb.F_Int64Repeated = make([]int64, N) - pb.F_Int32Repeated = make([]int32, N) - pb.F_BoolRepeated = make([]bool, N) - pb.RepeatedField = make([]*GoTestField, N) - - // Fill in the arrays with checkable values. - igtf := initGoTestField() - igtrg := initGoTest_RepeatedGroup() - for i := 0; i < N; i++ { - pb.Repeatedgroup[i] = igtrg - pb.F_Sint64Repeated[i] = int64(i) - pb.F_Sint32Repeated[i] = int32(i) - s := fmt.Sprint(i) - pb.F_BytesRepeated[i] = []byte(s) - pb.F_StringRepeated[i] = s - pb.F_DoubleRepeated[i] = float64(i) - pb.F_FloatRepeated[i] = float32(i) - pb.F_Uint64Repeated[i] = uint64(i) - pb.F_Uint32Repeated[i] = uint32(i) - pb.F_Fixed64Repeated[i] = uint64(i) - pb.F_Fixed32Repeated[i] = uint32(i) - pb.F_Int64Repeated[i] = int64(i) - pb.F_Int32Repeated[i] = int32(i) - pb.F_BoolRepeated[i] = i%2 == 0 - pb.RepeatedField[i] = igtf - } - - // Marshal. - buf, _ := Marshal(pb) - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - Unmarshal(buf, pbd) - - // Check the checkable values - for i := uint64(0); i < N; i++ { - if pbd.Repeatedgroup[i] == nil { // TODO: more checking? - t.Error("pbd.Repeatedgroup bad") - } - var x uint64 - x = uint64(pbd.F_Sint64Repeated[i]) - if x != i { - t.Error("pbd.F_Sint64Repeated bad", x, i) - } - x = uint64(pbd.F_Sint32Repeated[i]) - if x != i { - t.Error("pbd.F_Sint32Repeated bad", x, i) - } - s := fmt.Sprint(i) - equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) - if pbd.F_StringRepeated[i] != s { - t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) - } - x = uint64(pbd.F_DoubleRepeated[i]) - if x != i { - t.Error("pbd.F_DoubleRepeated bad", x, i) - } - x = uint64(pbd.F_FloatRepeated[i]) - if x != i { - t.Error("pbd.F_FloatRepeated bad", x, i) - } - x = pbd.F_Uint64Repeated[i] - if x != i { - t.Error("pbd.F_Uint64Repeated bad", x, i) - } - x = uint64(pbd.F_Uint32Repeated[i]) - if x != i { - t.Error("pbd.F_Uint32Repeated bad", x, i) - } - x = pbd.F_Fixed64Repeated[i] - if x != i { - t.Error("pbd.F_Fixed64Repeated bad", x, i) - } - x = uint64(pbd.F_Fixed32Repeated[i]) - if x != i { - t.Error("pbd.F_Fixed32Repeated bad", x, i) - } - x = uint64(pbd.F_Int64Repeated[i]) - if x != i { - t.Error("pbd.F_Int64Repeated bad", x, i) - } - x = uint64(pbd.F_Int32Repeated[i]) - if x != i { - t.Error("pbd.F_Int32Repeated bad", x, i) - } - if pbd.F_BoolRepeated[i] != (i%2 == 0) { - t.Error("pbd.F_BoolRepeated bad", x, i) - } - if pbd.RepeatedField[i] == nil { // TODO: more checking? - t.Error("pbd.RepeatedField bad") - } - } -} - -// Verify we give a useful message when decoding to the wrong structure type. -func TestTypeMismatch(t *testing.T) { - pb1 := initGoTest(true) - - // Marshal - o := old() - o.Marshal(pb1) - - // Now Unmarshal it to the wrong type. - pb2 := initGoTestField() - err := o.Unmarshal(pb2) - if err == nil { - t.Error("expected error, got no error") - } else if !strings.Contains(err.Error(), "bad wiretype") { - t.Error("expected bad wiretype error, got", err) - } -} - -func encodeDecode(t *testing.T, in, out Message, msg string) { - buf, err := Marshal(in) - if err != nil { - t.Fatalf("failed marshaling %v: %v", msg, err) - } - if err := Unmarshal(buf, out); err != nil { - t.Fatalf("failed unmarshaling %v: %v", msg, err) - } -} - -func TestPackedNonPackedDecoderSwitching(t *testing.T) { - np, p := new(NonPackedTest), new(PackedTest) - - // non-packed -> packed - np.A = []int32{0, 1, 1, 2, 3, 5} - encodeDecode(t, np, p, "non-packed -> packed") - if !reflect.DeepEqual(np.A, p.B) { - t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) - } - - // packed -> non-packed - np.Reset() - p.B = []int32{3, 1, 4, 1, 5, 9} - encodeDecode(t, p, np, "packed -> non-packed") - if !reflect.DeepEqual(p.B, np.A) { - t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) - } -} - -func TestProto1RepeatedGroup(t *testing.T) { - pb := &MessageList{ - Message: []*MessageList_Message{ - { - Name: String("blah"), - Count: Int32(7), - }, - // NOTE: pb.Message[1] is a nil - nil, - }, - } - - o := old() - err := o.Marshal(pb) - if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { - t.Fatalf("unexpected or no error when marshaling: %v", err) - } -} - -// Test that enums work. Checks for a bug introduced by making enums -// named types instead of int32: newInt32FromUint64 would crash with -// a type mismatch in reflect.PointTo. -func TestEnum(t *testing.T) { - pb := new(GoEnum) - pb.Foo = FOO_FOO1.Enum() - o := old() - if err := o.Marshal(pb); err != nil { - t.Fatal("error encoding enum:", err) - } - pb1 := new(GoEnum) - if err := o.Unmarshal(pb1); err != nil { - t.Fatal("error decoding enum:", err) - } - if *pb1.Foo != FOO_FOO1 { - t.Error("expected 7 but got ", *pb1.Foo) - } -} - -// Enum types have String methods. Check that enum fields can be printed. -// We don't care what the value actually is, just as long as it doesn't crash. -func TestPrintingNilEnumFields(t *testing.T) { - pb := new(GoEnum) - fmt.Sprintf("%+v", pb) -} - -// Verify that absent required fields cause Marshal/Unmarshal to return errors. -func TestRequiredFieldEnforcement(t *testing.T) { - pb := new(GoTestField) - _, err := Marshal(pb) - if err == nil { - t.Error("marshal: expected error, got nil") - } else if strings.Index(err.Error(), "Label") < 0 { - t.Errorf("marshal: bad error type: %v", err) - } - - // A slightly sneaky, yet valid, proto. It encodes the same required field twice, - // so simply counting the required fields is insufficient. - // field 1, encoding 2, value "hi" - buf := []byte("\x0A\x02hi\x0A\x02hi") - err = Unmarshal(buf, pb) - if err == nil { - t.Error("unmarshal: expected error, got nil") - } else if strings.Index(err.Error(), "{Unknown}") < 0 { - t.Errorf("unmarshal: bad error type: %v", err) - } -} - -func TestTypedNilMarshal(t *testing.T) { - // A typed nil should return ErrNil and not crash. - _, err := Marshal((*GoEnum)(nil)) - if err != ErrNil { - t.Errorf("Marshal: got err %v, want ErrNil", err) - } -} - -// A type that implements the Marshaler interface, but is not nillable. -type nonNillableInt uint64 - -func (nni nonNillableInt) Marshal() ([]byte, error) { - return EncodeVarint(uint64(nni)), nil -} - -type NNIMessage struct { - nni nonNillableInt -} - -func (*NNIMessage) Reset() {} -func (*NNIMessage) String() string { return "" } -func (*NNIMessage) ProtoMessage() {} - -// A type that implements the Marshaler interface and is nillable. -type nillableMessage struct { - x uint64 -} - -func (nm *nillableMessage) Marshal() ([]byte, error) { - return EncodeVarint(nm.x), nil -} - -type NMMessage struct { - nm *nillableMessage -} - -func (*NMMessage) Reset() {} -func (*NMMessage) String() string { return "" } -func (*NMMessage) ProtoMessage() {} - -// Verify a type that uses the Marshaler interface, but has a nil pointer. -func TestNilMarshaler(t *testing.T) { - // Try a struct with a Marshaler field that is nil. - // It should be directly marshable. - nmm := new(NMMessage) - if _, err := Marshal(nmm); err != nil { - t.Error("unexpected error marshaling nmm: ", err) - } - - // Try a struct with a Marshaler field that is not nillable. - nnim := new(NNIMessage) - nnim.nni = 7 - var _ Marshaler = nnim.nni // verify it is truly a Marshaler - if _, err := Marshal(nnim); err != nil { - t.Error("unexpected error marshaling nnim: ", err) - } -} - -func TestAllSetDefaults(t *testing.T) { - // Exercise SetDefaults with all scalar field types. - m := &Defaults{ - // NaN != NaN, so override that here. - F_Nan: Float32(1.7), - } - expected := &Defaults{ - F_Bool: Bool(true), - F_Int32: Int32(32), - F_Int64: Int64(64), - F_Fixed32: Uint32(320), - F_Fixed64: Uint64(640), - F_Uint32: Uint32(3200), - F_Uint64: Uint64(6400), - F_Float: Float32(314159), - F_Double: Float64(271828), - F_String: String(`hello, "world!"` + "\n"), - F_Bytes: []byte("Bignose"), - F_Sint32: Int32(-32), - F_Sint64: Int64(-64), - F_Enum: Defaults_GREEN.Enum(), - F_Pinf: Float32(float32(math.Inf(1))), - F_Ninf: Float32(float32(math.Inf(-1))), - F_Nan: Float32(1.7), - StrZero: String(""), - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithSetField(t *testing.T) { - // Check that a set value is not overridden. - m := &Defaults{ - F_Int32: Int32(12), - } - SetDefaults(m) - if v := m.GetF_Int32(); v != 12 { - t.Errorf("m.FInt32 = %v, want 12", v) - } -} - -func TestSetDefaultsWithSubMessage(t *testing.T) { - m := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - }, - } - expected := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("gopher"), - Port: Int32(4000), - }, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { - m := &MyMessage{ - RepInner: []*InnerMessage{{}}, - } - expected := &MyMessage{ - RepInner: []*InnerMessage{{ - Port: Int32(4000), - }}, - } - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { - m := &MyMessage{ - Pet: []string{"turtle", "wombat"}, - } - expected := Clone(m) - SetDefaults(m) - if !Equal(m, expected) { - t.Errorf("\n got %v\nwant %v", m, expected) - } -} - -func TestMaximumTagNumber(t *testing.T) { - m := &MaxTag{ - LastField: String("natural goat essence"), - } - buf, err := Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal failed: %v", err) - } - m2 := new(MaxTag) - if err := Unmarshal(buf, m2); err != nil { - t.Fatalf("proto.Unmarshal failed: %v", err) - } - if got, want := m2.GetLastField(), *m.LastField; got != want { - t.Errorf("got %q, want %q", got, want) - } -} - -func TestJSON(t *testing.T) { - m := &MyMessage{ - Count: Int32(4), - Pet: []string{"bunny", "kitty"}, - Inner: &InnerMessage{ - Host: String("cauchy"), - }, - Bikeshed: MyMessage_GREEN.Enum(), - } - const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` - - b, err := json.Marshal(m) - if err != nil { - t.Fatalf("json.Marshal failed: %v", err) - } - s := string(b) - if s != expected { - t.Errorf("got %s\nwant %s", s, expected) - } - - received := new(MyMessage) - if err := json.Unmarshal(b, received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } - - // Test unmarshalling of JSON with symbolic enum name. - const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` - received.Reset() - if err := json.Unmarshal([]byte(old), received); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) - } - if !Equal(received, m) { - t.Fatalf("got %s, want %s", received, m) - } -} - -func TestBadWireType(t *testing.T) { - b := []byte{7<<3 | 6} // field 7, wire type 6 - pb := new(OtherMessage) - if err := Unmarshal(b, pb); err == nil { - t.Errorf("Unmarshal did not fail") - } else if !strings.Contains(err.Error(), "unknown wire type") { - t.Errorf("wrong error: %v", err) - } -} - -func TestBytesWithInvalidLength(t *testing.T) { - // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. - b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} - Unmarshal(b, new(MyMessage)) -} - -func TestLengthOverflow(t *testing.T) { - // Overflowing a length should not panic. - b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} - Unmarshal(b, new(MyMessage)) -} - -func TestVarintOverflow(t *testing.T) { - // Overflowing a 64-bit length should not be allowed. - b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} - if err := Unmarshal(b, new(MyMessage)); err == nil { - t.Fatalf("Overflowed uint64 length without error") - } -} - -func TestUnmarshalFuzz(t *testing.T) { - const N = 1000 - seed := time.Now().UnixNano() - t.Logf("RNG seed is %d", seed) - rng := rand.New(rand.NewSource(seed)) - buf := make([]byte, 20) - for i := 0; i < N; i++ { - for j := range buf { - buf[j] = byte(rng.Intn(256)) - } - fuzzUnmarshal(t, buf) - } -} - -func TestMergeMessages(t *testing.T) { - pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} - data, err := Marshal(pb) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - pb1 := new(MessageList) - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("first Unmarshal: %v", err) - } - if err := Unmarshal(data, pb1); err != nil { - t.Fatalf("second Unmarshal: %v", err) - } - if len(pb1.Message) != 1 { - t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) - } - - pb2 := new(MessageList) - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("first UnmarshalMerge: %v", err) - } - if err := UnmarshalMerge(data, pb2); err != nil { - t.Fatalf("second UnmarshalMerge: %v", err) - } - if len(pb2.Message) != 2 { - t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) - } -} - -func TestExtensionMarshalOrder(t *testing.T) { - m := &MyMessage{Count: Int(123)} - if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { - t.Fatalf("SetExtension: %v", err) - } - if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { - t.Fatalf("SetExtension: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - var orig []byte - for i := 0; i < 100; i++ { - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if i == 0 { - orig = b - continue - } - if !bytes.Equal(b, orig) { - t.Errorf("Bytes differ on attempt #%d", i) - } - } -} - -// Many extensions, because small maps might not iterate differently on each iteration. -var exts = []*ExtensionDesc{ - E_X201, - E_X202, - E_X203, - E_X204, - E_X205, - E_X206, - E_X207, - E_X208, - E_X209, - E_X210, - E_X211, - E_X212, - E_X213, - E_X214, - E_X215, - E_X216, - E_X217, - E_X218, - E_X219, - E_X220, - E_X221, - E_X222, - E_X223, - E_X224, - E_X225, - E_X226, - E_X227, - E_X228, - E_X229, - E_X230, - E_X231, - E_X232, - E_X233, - E_X234, - E_X235, - E_X236, - E_X237, - E_X238, - E_X239, - E_X240, - E_X241, - E_X242, - E_X243, - E_X244, - E_X245, - E_X246, - E_X247, - E_X248, - E_X249, - E_X250, -} - -func TestMessageSetMarshalOrder(t *testing.T) { - m := &MyMessageSet{} - for _, x := range exts { - if err := SetExtension(m, x, &Empty{}); err != nil { - t.Fatalf("SetExtension: %v", err) - } - } - - buf, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // Serialize m several times, and check we get the same bytes each time. - for i := 0; i < 10; i++ { - b1, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if !bytes.Equal(b1, buf) { - t.Errorf("Bytes differ on re-Marshal #%d", i) - } - - m2 := &MyMessageSet{} - if err := Unmarshal(buf, m2); err != nil { - t.Errorf("Unmarshal: %v", err) - } - b2, err := Marshal(m2) - if err != nil { - t.Errorf("re-Marshal: %v", err) - } - if !bytes.Equal(b2, buf) { - t.Errorf("Bytes differ on round-trip #%d", i) - } - } -} - -func TestUnmarshalMergesMessages(t *testing.T) { - // If a nested message occurs twice in the input, - // the fields should be merged when decoding. - a := &OtherMessage{ - Key: Int64(123), - Inner: &InnerMessage{ - Host: String("polhode"), - Port: Int32(1234), - }, - } - aData, err := Marshal(a) - if err != nil { - t.Fatalf("Marshal(a): %v", err) - } - b := &OtherMessage{ - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Connected: Bool(true), - }, - } - bData, err := Marshal(b) - if err != nil { - t.Fatalf("Marshal(b): %v", err) - } - want := &OtherMessage{ - Key: Int64(123), - Weight: Float32(1.2), - Inner: &InnerMessage{ - Host: String("herpolhode"), - Port: Int32(1234), - Connected: Bool(true), - }, - } - got := new(OtherMessage) - if err := Unmarshal(append(aData, bData...), got); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !Equal(got, want) { - t.Errorf("\n got %v\nwant %v", got, want) - } -} - -func TestEncodingSizes(t *testing.T) { - tests := []struct { - m Message - n int - }{ - {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, - {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, - {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, - {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, - } - for _, test := range tests { - b, err := Marshal(test.m) - if err != nil { - t.Errorf("Marshal(%v): %v", test.m, err) - continue - } - if len(b) != test.n { - t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) - } - } -} - -func TestRequiredNotSetError(t *testing.T) { - pb := initGoTest(false) - pb.RequiredField.Label = nil - pb.F_Int32Required = nil - pb.F_Int64Required = nil - - expected := "0807" + // field 1, encoding 0, value 7 - "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) - "5001" + // field 10, encoding 0, value 1 - "6d20000000" + // field 13, encoding 5, value 0x20 - "714000000000000000" + // field 14, encoding 1, value 0x40 - "78a019" + // field 15, encoding 0, value 0xca0 = 3232 - "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 - "8d0100004a45" + // field 17, encoding 5, value 3232.0 - "9101000000000040b940" + // field 18, encoding 1, value 6464.0 - "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" - "b304" + // field 70, encoding 3, start group - "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" - "b404" + // field 70, encoding 4, end group - "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" - "b0063f" + // field 102, encoding 0, 0x3f zigzag32 - "b8067f" // field 103, encoding 0, 0x7f zigzag64 - - o := old() - bytes, err := Marshal(pb) - if _, ok := err.(*RequiredNotSetError); !ok { - fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("expected = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-1 wrong err msg: %v", err) - } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 1", bytes) - t.Fatalf("expected = %s", expected) - } - - // Now test Unmarshal by recreating the original buffer. - pbd := new(GoTest) - err = Unmarshal(bytes, pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { - t.Errorf("unmarshal wrong err msg: %v", err) - } - bytes, err = Marshal(pbd) - if _, ok := err.(*RequiredNotSetError); !ok { - t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) - t.Fatalf("string = %s", expected) - } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { - t.Errorf("marshal-2 wrong err msg: %v", err) - } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 2", bytes) - t.Fatalf("string = %s", expected) - } -} - -func fuzzUnmarshal(t *testing.T, data []byte) { - defer func() { - if e := recover(); e != nil { - t.Errorf("These bytes caused a panic: %+v", data) - t.Logf("Stack:\n%s", debug.Stack()) - t.FailNow() - } - }() - - pb := new(MyMessage) - Unmarshal(data, pb) -} - -func TestMapFieldMarshal(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - // b should be the concatenation of these three byte sequences in some order. - parts := []string{ - "\n\a\b\x01\x12\x03Rob", - "\n\a\b\x04\x12\x03Ian", - "\n\b\b\x08\x12\x04Dave", - } - ok := false - for i := range parts { - for j := range parts { - if j == i { - continue - } - for k := range parts { - if k == i || k == j { - continue - } - try := parts[i] + parts[j] + parts[k] - if bytes.Equal(b, []byte(try)) { - ok = true - break - } - } - } - } - if !ok { - t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) - } - t.Logf("FYI b: %q", b) - - (new(Buffer)).DebugPrint("Dump of b", b) -} - -func TestMapFieldRoundTrips(t *testing.T) { - m := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Rob", - 4: "Ian", - 8: "Dave", - }, - MsgMapping: map[int64]*FloatingPoint{ - 0x7001: &FloatingPoint{F: Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{ - false: []byte("that's not right!"), - true: []byte("aye, 'tis true!"), - }, - } - b, err := Marshal(m) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("FYI b: %q", b) - m2 := new(MessageWithMap) - if err := Unmarshal(b, m2); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - for _, pair := range [][2]interface{}{ - {m.NameMapping, m2.NameMapping}, - {m.MsgMapping, m2.MsgMapping}, - {m.ByteMapping, m2.ByteMapping}, - } { - if !reflect.DeepEqual(pair[0], pair[1]) { - t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) - } - } -} - -func TestMapFieldWithNil(t *testing.T) { - m := &MessageWithMap{ - MsgMapping: map[int64]*FloatingPoint{ - 1: nil, - }, - } - b, err := Marshal(m) - if err == nil { - t.Fatalf("Marshal of bad map should have failed, got these bytes: %v", b) - } -} - -// Benchmarks - -func testMsg() *GoTest { - pb := initGoTest(true) - const N = 1000 // Internally the library starts much smaller. - pb.F_Int32Repeated = make([]int32, N) - pb.F_DoubleRepeated = make([]float64, N) - for i := 0; i < N; i++ { - pb.F_Int32Repeated[i] = int32(i) - pb.F_DoubleRepeated[i] = float64(i) - } - return pb -} - -func bytesMsg() *GoTest { - pb := initGoTest(true) - buf := make([]byte, 4000) - for i := range buf { - buf[i] = byte(i) - } - pb.F_BytesDefaulted = buf - return pb -} - -func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { - d, _ := marshal(pb) - b.SetBytes(int64(len(d))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - marshal(pb) - } -} - -func benchmarkBufferMarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - p.Reset() - err := p.Marshal(pb0) - return p.Bytes(), err - }) -} - -func benchmarkSize(b *testing.B, pb Message) { - benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { - Size(pb) - return nil, nil - }) -} - -func newOf(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - return reflect.New(in.Type().Elem()).Interface().(Message) -} - -func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { - d, _ := Marshal(pb) - b.SetBytes(int64(len(d))) - pbd := newOf(pb) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - unmarshal(d, pbd) - } -} - -func benchmarkBufferUnmarshal(b *testing.B, pb Message) { - p := NewBuffer(nil) - benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { - p.SetBuf(d) - return p.Unmarshal(pb0) - }) -} - -// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} - -func BenchmarkMarshal(b *testing.B) { - benchmarkMarshal(b, testMsg(), Marshal) -} - -func BenchmarkBufferMarshal(b *testing.B) { - benchmarkBufferMarshal(b, testMsg()) -} - -func BenchmarkSize(b *testing.B) { - benchmarkSize(b, testMsg()) -} - -func BenchmarkUnmarshal(b *testing.B) { - benchmarkUnmarshal(b, testMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshal(b *testing.B) { - benchmarkBufferUnmarshal(b, testMsg()) -} - -func BenchmarkMarshalBytes(b *testing.B) { - benchmarkMarshal(b, bytesMsg(), Marshal) -} - -func BenchmarkBufferMarshalBytes(b *testing.B) { - benchmarkBufferMarshal(b, bytesMsg()) -} - -func BenchmarkSizeBytes(b *testing.B) { - benchmarkSize(b, bytesMsg()) -} - -func BenchmarkUnmarshalBytes(b *testing.B) { - benchmarkUnmarshal(b, bytesMsg(), Unmarshal) -} - -func BenchmarkBufferUnmarshalBytes(b *testing.B) { - benchmarkBufferUnmarshal(b, bytesMsg()) -} - -func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { - b.StopTimer() - pb := initGoTestField() - skip := &GoSkipTest{ - SkipInt32: Int32(32), - SkipFixed32: Uint32(3232), - SkipFixed64: Uint64(6464), - SkipString: String("skipper"), - Skipgroup: &GoSkipTest_SkipGroup{ - GroupInt32: Int32(75), - GroupString: String("wxyz"), - }, - } - - pbd := new(GoTestField) - p := NewBuffer(nil) - p.Marshal(pb) - p.Marshal(skip) - p2 := NewBuffer(nil) - - b.StartTimer() - for i := 0; i < b.N; i++ { - p2.SetBuf(p.Bytes()) - p2.Unmarshal(pbd) - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index 915a68b8e..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,212 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: MessageSet and RawMessage. - -package proto - -import ( - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") - } - if in.IsNil() { - // Merging nil into non-nil is a quiet no-op - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extendableProto); ok { - emOut := out.Addr().Interface().(extendableProto) - mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go deleted file mode 100644 index a1c697bc8..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go +++ /dev/null @@ -1,245 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/golang/protobuf/proto" - - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -var cloneTestMessage = &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, -} - -func init() { - ext := &pb.Ext{ - Data: proto.String("extension"), - } - if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { - panic("SetExtension: " + err.Error()) - } -} - -func TestClone(t *testing.T) { - m := proto.Clone(cloneTestMessage).(*pb.MyMessage) - if !proto.Equal(m, cloneTestMessage) { - t.Errorf("Clone(%v) = %v", cloneTestMessage, m) - } - - // Verify it was a deep copy. - *m.Inner.Port++ - if proto.Equal(m, cloneTestMessage) { - t.Error("Mutating clone changed the original") - } - // Byte fields and repeated fields should be copied. - if &m.Pet[0] == &cloneTestMessage.Pet[0] { - t.Error("Pet: repeated field not copied") - } - if &m.Others[0] == &cloneTestMessage.Others[0] { - t.Error("Others: repeated field not copied") - } - if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { - t.Error("Others[0].Value: bytes field not copied") - } - if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { - t.Error("RepBytes: repeated field not copied") - } - if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { - t.Error("RepBytes[0]: bytes field not copied") - } -} - -func TestCloneNil(t *testing.T) { - var m *pb.MyMessage - if c := proto.Clone(m); !proto.Equal(m, c) { - t.Errorf("Clone(%v) = %v", m, c) - } -} - -var mergeTests = []struct { - src, dst, want proto.Message -}{ - { - src: &pb.MyMessage{ - Count: proto.Int32(42), - }, - dst: &pb.MyMessage{ - Name: proto.String("Dave"), - }, - want: &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - }, - }, - { - src: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - }, - Pet: []string{"horsey"}, - Others: []*pb.OtherMessage{ - { - Value: []byte("some bytes"), - }, - }, - }, - dst: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("niles"), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - { - // Explicitly test a src=nil field - Inner: nil, - }, - }, - }, - want: &pb.MyMessage{ - Inner: &pb.InnerMessage{ - Host: proto.String("hey"), - Connected: proto.Bool(true), - Port: proto.Int32(9099), - }, - Pet: []string{"bunny", "kitty", "horsey"}, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(31415926535), - }, - {}, - { - Value: []byte("some bytes"), - }, - }, - }, - }, - { - src: &pb.MyMessage{ - RepBytes: [][]byte{[]byte("wow")}, - }, - dst: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham")}, - }, - want: &pb.MyMessage{ - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(6), - }, - RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, - }, - }, - // Check that a scalar bytes field replaces rather than appends. - { - src: &pb.OtherMessage{Value: []byte("foo")}, - dst: &pb.OtherMessage{Value: []byte("bar")}, - want: &pb.OtherMessage{Value: []byte("foo")}, - }, - { - src: &pb.MessageWithMap{ - NameMapping: map[int32]string{6: "Nigel"}, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - dst: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Bruce", // should be overwritten - 7: "Andrew", - }, - }, - want: &pb.MessageWithMap{ - NameMapping: map[int32]string{ - 6: "Nigel", - 7: "Andrew", - }, - MsgMapping: map[int64]*pb.FloatingPoint{ - 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, - }, - ByteMapping: map[bool][]byte{true: []byte("wowsa")}, - }, - }, - // proto3 shouldn't merge zero values, - // in the same way that proto2 shouldn't merge nils. - { - src: &proto3pb.Message{ - Name: "Aaron", - Data: []byte(""), // zero value, but not nil - }, - dst: &proto3pb.Message{ - HeightInCm: 176, - Data: []byte("texas!"), - }, - want: &proto3pb.Message{ - Name: "Aaron", - HeightInCm: 176, - Data: []byte("texas!"), - }, - }, -} - -func TestMerge(t *testing.T) { - for _, m := range mergeTests { - got := proto.Clone(m.dst) - proto.Merge(got, m.src) - if !proto.Equal(got, m.want) { - t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index bf71dcad1..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,827 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - // x, n already 0 - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - // x, err already 0 - - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - return UnmarshalMerge(buf, pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - ext := e.ExtensionMap()[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - e.ExtensionMap()[int32(tag)] = ext - } - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - - y := *v - for i := 0; i < nb; i++ { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() || !valelem.IsValid() { - // We did not decode the key or the value in the map entry. - // Either way, it's an invalid map entry. - return fmt.Errorf("proto: bad map data: missing key/val") - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - - return err -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 91f3f0784..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,1293 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -func sizeFixed64(x uint64) int { - return 8 -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -func sizeFixed32(x uint64) int { - return 4 -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - var state errorState - if err != nil && !state.shouldContinue(err, nil) { - return nil, err - } - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - if err != nil { - return err - } - p.buf = append(p.buf, data...) - return nil - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - stats.Encode++ - } - - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - stats.Size++ - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += len(p.tagcode) - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - v := *structPointer_ExtMap(base, p.field) - if err := encodeExtensionMap(v); err != nil { - return err - } - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := *structPointer_ExtMap(base, p.field) - return sizeExtensionMap(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { - return err - } - return nil - } - - keys := v.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := v.MapIndex(key) - - // The only illegal map entry values are nil message pointers. - if val.Kind() == reflect.Ptr && val.IsNil() { - return errors.New("proto: map has nil element") - } - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index d8673a3e9..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,256 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. -// TODO: MessageSet. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal (a "bytes" field, - although represented by []byte, is not a repeated field) - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true -} - -// v1 and v2 are known to have the same type. -func equalAny(v1, v2 reflect.Value) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2) { - return false - } - } - return true - case reflect.Ptr: - return equalAny(v1.Elem(), v2.Elem()) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i)) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// em1 and em2 are extension maps. -func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { - return false - } - } - - return true -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go deleted file mode 100644 index b322f65ab..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go +++ /dev/null @@ -1,191 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - . "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -// Four identical base messages. -// The init function adds extensions to some of them. -var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} -var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} - -// Two messages with non-message extensions. -var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} -var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)} - -func init() { - ext1 := &pb.Ext{Data: String("Kirk")} - ext2 := &pb.Ext{Data: String("Picard")} - - // messageWithExtension1a has ext1, but never marshals it. - if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil { - panic("SetExtension on 1a failed: " + err.Error()) - } - - // messageWithExtension1b is the unmarshaled form of messageWithExtension1a. - if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil { - panic("SetExtension on 1b failed: " + err.Error()) - } - buf, err := Marshal(messageWithExtension1b) - if err != nil { - panic("Marshal of 1b failed: " + err.Error()) - } - messageWithExtension1b.Reset() - if err := Unmarshal(buf, messageWithExtension1b); err != nil { - panic("Unmarshal of 1b failed: " + err.Error()) - } - - // messageWithExtension2 has ext2. - if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil { - panic("SetExtension on 2 failed: " + err.Error()) - } - - if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil { - panic("SetExtension on Int32-1 failed: " + err.Error()) - } - if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { - panic("SetExtension on Int32-2 failed: " + err.Error()) - } -} - -var EqualTests = []struct { - desc string - a, b Message - exp bool -}{ - {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false}, - {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true}, - {"nil vs nil", nil, nil, true}, - {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true}, - {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false}, - {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false}, - - {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false}, - {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false}, - {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false}, - {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true}, - - {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false}, - {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false}, - {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false}, - {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true}, - {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true}, - {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true}, - {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true}, - - { - "nested, different", - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}}, - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}}, - false, - }, - { - "nested, equal", - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, - &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, - true, - }, - - {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true}, - {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true}, - {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false}, - { - "repeated bytes", - &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, - &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, - true, - }, - - {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false}, - {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true}, - {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false}, - - {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, - {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, - - { - "message with group", - &pb.MyMessage{ - Count: Int32(1), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: Int32(5), - }, - }, - &pb.MyMessage{ - Count: Int32(1), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: Int32(5), - }, - }, - true, - }, - - { - "map same", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - true, - }, - { - "map different entry", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}}, - false, - }, - { - "map different key only", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}}, - false, - }, - { - "map different value only", - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, - &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}}, - false, - }, -} - -func TestEqual(t *testing.T) { - for _, tc := range EqualTests { - if res := Equal(tc.a, tc.b); res != tc.exp { - t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index e591ccef7..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,400 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base extendableProto, id int32, b []byte) { - base.ExtensionMap()[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - // Check the extended type. - if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. -func encodeExtensionMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func sizeExtensionMap(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - _, ok := pb.ExtensionMap()[extension.Field] - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb extendableProto, extension *ExtensionDesc) { - // TODO: Check types, field numbers, etc.? - delete(pb.ExtensionMap(), extension.Field) -} - -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. -func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { - if err := checkExtensionTypes(pb, extension); err != nil { - return nil, err - } - - emap := pb.ExtensionMap() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - - t := reflect.TypeOf(extension.ExtensionType) - rep := extension.repeated() - - props := extensionProperties(extension) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. - value := reflect.New(t).Elem() - - for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err - } - - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if !rep || o.index >= len(o.buf) { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := pb.(extendableProto) - if !ok { - err = errors.New("proto: not an extendable proto") - return - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { - if err := checkExtensionTypes(pb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go deleted file mode 100644 index 72552767d..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go +++ /dev/null @@ -1,292 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "fmt" - "reflect" - "testing" - - "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -func TestGetExtensionsWithMissingExtensions(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Fatalf("Could not set ext1: %s", ext1) - } - exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ - pb.E_Ext_More, - pb.E_Ext_Text, - }) - if err != nil { - t.Fatalf("GetExtensions() failed: %s", err) - } - if exts[0] != ext1 { - t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) - } - if exts[1] != nil { - t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) - } -} - -func TestGetExtensionStability(t *testing.T) { - check := func(m *pb.MyMessage) bool { - ext1, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - ext2, err := proto.GetExtension(m, pb.E_Ext_More) - if err != nil { - t.Fatalf("GetExtension() failed: %s", err) - } - return ext1 == ext2 - } - msg := &pb.MyMessage{Count: proto.Int32(4)} - ext0 := &pb.Ext{} - if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { - t.Fatalf("Could not set ext1: %s", ext0) - } - if !check(msg) { - t.Errorf("GetExtension() not stable before marshaling") - } - bb, err := proto.Marshal(msg) - if err != nil { - t.Fatalf("Marshal() failed: %s", err) - } - msg1 := &pb.MyMessage{} - err = proto.Unmarshal(bb, msg1) - if err != nil { - t.Fatalf("Unmarshal() failed: %s", err) - } - if !check(msg1) { - t.Errorf("GetExtension() not stable after unmarshaling") - } -} - -func TestGetExtensionDefaults(t *testing.T) { - var setFloat64 float64 = 1 - var setFloat32 float32 = 2 - var setInt32 int32 = 3 - var setInt64 int64 = 4 - var setUint32 uint32 = 5 - var setUint64 uint64 = 6 - var setBool = true - var setBool2 = false - var setString = "Goodnight string" - var setBytes = []byte("Goodnight bytes") - var setEnum = pb.DefaultsMessage_TWO - - type testcase struct { - ext *proto.ExtensionDesc // Extension we are testing. - want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail). - def interface{} // Expected value of extension after ClearExtension(). - } - tests := []testcase{ - {pb.E_NoDefaultDouble, setFloat64, nil}, - {pb.E_NoDefaultFloat, setFloat32, nil}, - {pb.E_NoDefaultInt32, setInt32, nil}, - {pb.E_NoDefaultInt64, setInt64, nil}, - {pb.E_NoDefaultUint32, setUint32, nil}, - {pb.E_NoDefaultUint64, setUint64, nil}, - {pb.E_NoDefaultSint32, setInt32, nil}, - {pb.E_NoDefaultSint64, setInt64, nil}, - {pb.E_NoDefaultFixed32, setUint32, nil}, - {pb.E_NoDefaultFixed64, setUint64, nil}, - {pb.E_NoDefaultSfixed32, setInt32, nil}, - {pb.E_NoDefaultSfixed64, setInt64, nil}, - {pb.E_NoDefaultBool, setBool, nil}, - {pb.E_NoDefaultBool, setBool2, nil}, - {pb.E_NoDefaultString, setString, nil}, - {pb.E_NoDefaultBytes, setBytes, nil}, - {pb.E_NoDefaultEnum, setEnum, nil}, - {pb.E_DefaultDouble, setFloat64, float64(3.1415)}, - {pb.E_DefaultFloat, setFloat32, float32(3.14)}, - {pb.E_DefaultInt32, setInt32, int32(42)}, - {pb.E_DefaultInt64, setInt64, int64(43)}, - {pb.E_DefaultUint32, setUint32, uint32(44)}, - {pb.E_DefaultUint64, setUint64, uint64(45)}, - {pb.E_DefaultSint32, setInt32, int32(46)}, - {pb.E_DefaultSint64, setInt64, int64(47)}, - {pb.E_DefaultFixed32, setUint32, uint32(48)}, - {pb.E_DefaultFixed64, setUint64, uint64(49)}, - {pb.E_DefaultSfixed32, setInt32, int32(50)}, - {pb.E_DefaultSfixed64, setInt64, int64(51)}, - {pb.E_DefaultBool, setBool, true}, - {pb.E_DefaultBool, setBool2, true}, - {pb.E_DefaultString, setString, "Hello, string"}, - {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, - {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, - } - - checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { - val, err := proto.GetExtension(msg, test.ext) - if err != nil { - if valWant != nil { - return fmt.Errorf("GetExtension(): %s", err) - } - if want := proto.ErrMissingExtension; err != want { - return fmt.Errorf("Unexpected error: got %v, want %v", err, want) - } - return nil - } - - // All proto2 extension values are either a pointer to a value or a slice of values. - ty := reflect.TypeOf(val) - tyWant := reflect.TypeOf(test.ext.ExtensionType) - if got, want := ty, tyWant; got != want { - return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) - } - tye := ty.Elem() - tyeWant := tyWant.Elem() - if got, want := tye, tyeWant; got != want { - return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) - } - - // Check the name of the type of the value. - // If it is an enum it will be type int32 with the name of the enum. - if got, want := tye.Name(), tye.Name(); got != want { - return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) - } - - // Check that value is what we expect. - // If we have a pointer in val, get the value it points to. - valExp := val - if ty.Kind() == reflect.Ptr { - valExp = reflect.ValueOf(val).Elem().Interface() - } - if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { - return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) - } - - return nil - } - - setTo := func(test testcase) interface{} { - setTo := reflect.ValueOf(test.want) - if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { - setTo = reflect.New(typ).Elem() - setTo.Set(reflect.New(setTo.Type().Elem())) - setTo.Elem().Set(reflect.ValueOf(test.want)) - } - return setTo.Interface() - } - - for _, test := range tests { - msg := &pb.DefaultsMessage{} - name := test.ext.Name - - // Check the initial value. - if err := checkVal(test, msg, test.def); err != nil { - t.Errorf("%s: %v", name, err) - } - - // Set the per-type value and check value. - name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) - if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { - t.Errorf("%s: SetExtension(): %v", name, err) - continue - } - if err := checkVal(test, msg, test.want); err != nil { - t.Errorf("%s: %v", name, err) - continue - } - - // Set and check the value. - name += " (cleared)" - proto.ClearExtension(msg, test.ext) - if err := checkVal(test, msg, test.def); err != nil { - t.Errorf("%s: %v", name, err) - } - } -} - -func TestExtensionsRoundTrip(t *testing.T) { - msg := &pb.MyMessage{} - ext1 := &pb.Ext{ - Data: proto.String("hi"), - } - ext2 := &pb.Ext{ - Data: proto.String("there"), - } - exists := proto.HasExtension(msg, pb.E_Ext_More) - if exists { - t.Error("Extension More present unexpectedly") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { - t.Error(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { - t.Error(err) - } - e, err := proto.GetExtension(msg, pb.E_Ext_More) - if err != nil { - t.Error(err) - } - x, ok := e.(*pb.Ext) - if !ok { - t.Errorf("e has type %T, expected testdata.Ext", e) - } else if *x.Data != "there" { - t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) - } - proto.ClearExtension(msg, pb.E_Ext_More) - if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { - t.Errorf("got %v, expected ErrMissingExtension", e) - } - if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { - t.Error("expected bad extension error, got nil") - } - if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { - t.Error("expected extension err") - } - if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { - t.Error("expected some sort of type mismatch error, got nil") - } -} - -func TestNilExtension(t *testing.T) { - msg := &pb.MyMessage{ - Count: proto.Int32(1), - } - if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { - t.Fatal(err) - } - if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { - t.Error("expected SetExtension to fail due to a nil extension") - } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { - t.Errorf("expected error %v, got %v", want, err) - } - // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update - // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index 95f7975dd..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,841 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - -package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // write point - - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - break - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - if err != nil { - fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - if err != nil { - fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index 9d912bce1..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,287 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" -) - -// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and MessageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. -// -// When a proto1 proto has a field that looks like: -// optional message info = 3; -// the protocol compiler produces a field in the generated struct that looks like: -// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"` -// The package is automatically inserted so there is no need for that proto file to -// import this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type MessageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure MessageSet is a Message. -var _ Message = (*MessageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *MessageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *MessageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false -} - -func (ms *MessageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return ErrNoMessageTypeId - } - return nil // TODO: return error instead? -} - -func (ms *MessageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return ErrNoMessageTypeId - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *MessageSet) Reset() { *ms = MessageSet{} } -func (ms *MessageSet) String() string { return CompactTextString(ms) } -func (*MessageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { - if err := encodeExtensionMap(m); err != nil { - return nil, err - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { - ms := new(MessageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go deleted file mode 100644 index 7c29bccf4..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "testing" -) - -func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { - // Check that a repeated message set entry will be concatenated. - in := &MessageSet{ - Item: []*_MessageSet_Item{ - {TypeId: Int32(12345), Message: []byte("hoo")}, - {TypeId: Int32(12345), Message: []byte("hah")}, - }, - } - b, err := Marshal(in) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - t.Logf("Marshaled bytes: %q", b) - - m := make(map[int32]Extension) - if err := UnmarshalMessageSet(b, m); err != nil { - t.Fatalf("UnmarshalMessageSet: %v", err) - } - ext, ok := m[12345] - if !ok { - t.Fatalf("Didn't retrieve extension 12345; map is %v", m) - } - // Skip wire type/field number and length varints. - got := skipVarint(skipVarint(ext.enc)) - if want := []byte("hoohah"); !bytes.Equal(got, want) { - t.Errorf("Combined extension is %q, want %q", got, want) - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 749919d25..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,479 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "math" - "reflect" -) - -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value -} - -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) -} - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value -} - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() -} - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } -} - -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value -} - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) - return - } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } -} - -func (p word64Slice) Len() int { - return p.v.Len() -} - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") -} - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index e9be0fe92..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,266 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != ^field(0) -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] -} - -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p -} - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x -} - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p -} - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] -} - -func word64_IsNil(p word64) bool { - return *p == nil -} - -func word64_Get(p word64) uint64 { - return **p -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x -} - -func word64Val_Get(p word64Val) uint64 { - return *p -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index d74844ab2..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,742 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only - - Default string // default value - HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s = "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - if p.OrigName != p.Name { - s += ",name=" + p.OrigName - } - if p.proto3 { - s += ",proto3" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint - case "fixed32": - p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 - case "fixed64": - p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 - case "zigzag32": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 - case "zigzag64": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break - } - } - } -} - -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - - switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: - p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } - } - - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_byte - p.dec = (*Buffer).dec_slice_byte - p.size = size_slice_byte - // This is a []byte, which is either a bytes field, - // or the value of a map field. In the latter case, - // we always encode an empty []byte, so we should not - // use the proto3 enc/size funcs. - // f == nil iff this is the key/value of a map field. - if p.proto3 && f != nil { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } - } - - case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if f != nil { - p.field = toField(f) - } - if tag == "" { - return - } - p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) - prop.unrecField = invalidField - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } - if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go deleted file mode 100644 index 37c778209..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go +++ /dev/null @@ -1,122 +0,0 @@ -// Code generated by protoc-gen-go. -// source: proto3_proto/proto3.proto -// DO NOT EDIT! - -/* -Package proto3_proto is a generated protocol buffer package. - -It is generated from these files: - proto3_proto/proto3.proto - -It has these top-level messages: - Message - Nested - MessageWithMap -*/ -package proto3_proto - -import proto "github.com/golang/protobuf/proto" -import testdata "github.com/golang/protobuf/proto/testdata" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal - -type Message_Humour int32 - -const ( - Message_UNKNOWN Message_Humour = 0 - Message_PUNS Message_Humour = 1 - Message_SLAPSTICK Message_Humour = 2 - Message_BILL_BAILEY Message_Humour = 3 -) - -var Message_Humour_name = map[int32]string{ - 0: "UNKNOWN", - 1: "PUNS", - 2: "SLAPSTICK", - 3: "BILL_BAILEY", -} -var Message_Humour_value = map[string]int32{ - "UNKNOWN": 0, - "PUNS": 1, - "SLAPSTICK": 2, - "BILL_BAILEY": 3, -} - -func (x Message_Humour) String() string { - return proto.EnumName(Message_Humour_name, int32(x)) -} - -type Message struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` - HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` - ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"` - TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"` - Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` - Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` - Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` - Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"` - Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} - -func (m *Message) GetNested() *Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *Message) GetTerrain() map[string]*Nested { - if m != nil { - return m.Terrain - } - return nil -} - -func (m *Message) GetProto2Field() *testdata.SubDefaults { - if m != nil { - return m.Proto2Field - } - return nil -} - -func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { - if m != nil { - return m.Proto2Value - } - return nil -} - -type Nested struct { - Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` -} - -func (m *Nested) Reset() { *m = Nested{} } -func (m *Nested) String() string { return proto.CompactTextString(m) } -func (*Nested) ProtoMessage() {} - -type MessageWithMap struct { - ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} - -func (m *MessageWithMap) GetByteMapping() map[bool][]byte { - if m != nil { - return m.ByteMapping - } - return nil -} - -func init() { - proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto deleted file mode 100644 index e2311d929..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto +++ /dev/null @@ -1,68 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -import "testdata/test.proto"; - -package proto3_proto; - -message Message { - enum Humour { - UNKNOWN = 0; - PUNS = 1; - SLAPSTICK = 2; - BILL_BAILEY = 3; - } - - string name = 1; - Humour hilarity = 2; - uint32 height_in_cm = 3; - bytes data = 4; - int64 result_count = 7; - bool true_scotsman = 8; - float score = 9; - - repeated uint64 key = 5; - Nested nested = 6; - - map terrain = 10; - testdata.SubDefaults proto2_field = 11; - map proto2_value = 13; -} - -message Nested { - string bunny = 1; -} - -message MessageWithMap { - map byte_mapping = 1; -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go deleted file mode 100644 index 462f8055c..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2014 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "testing" - - "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/proto/proto3_proto" - tpb "github.com/golang/protobuf/proto/testdata" -) - -func TestProto3ZeroValues(t *testing.T) { - tests := []struct { - desc string - m proto.Message - }{ - {"zero message", &pb.Message{}}, - {"empty bytes field", &pb.Message{Data: []byte{}}}, - } - for _, test := range tests { - b, err := proto.Marshal(test.m) - if err != nil { - t.Errorf("%s: proto.Marshal: %v", test.desc, err) - continue - } - if len(b) > 0 { - t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) - } - } -} - -func TestRoundTripProto3(t *testing.T) { - m := &pb.Message{ - Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" - Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 - HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 - Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" - ResultCount: 47, // (0 | 7<<3): 0x38 0x2f - TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 - Score: 8.1, // (5 | 9<<3): 0x4d <8.1> - - Key: []uint64{1, 0xdeadbeef}, - Nested: &pb.Nested{ - Bunny: "Monty", - }, - } - t.Logf(" m: %v", m) - - b, err := proto.Marshal(m) - if err != nil { - t.Fatalf("proto.Marshal: %v", err) - } - t.Logf(" b: %q", b) - - m2 := new(pb.Message) - if err := proto.Unmarshal(b, m2); err != nil { - t.Fatalf("proto.Unmarshal: %v", err) - } - t.Logf("m2: %v", m2) - - if !proto.Equal(m, m2) { - t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) - } -} - -func TestProto3SetDefaults(t *testing.T) { - in := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: new(tpb.SubDefaults), - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": new(tpb.SubDefaults), - }, - } - - got := proto.Clone(in).(*pb.Message) - proto.SetDefaults(got) - - // There are no defaults in proto3. Everything should be the zero value, but - // we need to remember to set defaults for nested proto2 messages. - want := &pb.Message{ - Terrain: map[string]*pb.Nested{ - "meadow": new(pb.Nested), - }, - Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, - Proto2Value: map[string]*tpb.SubDefaults{ - "badlands": &tpb.SubDefaults{N: proto.Int64(7)}, - }, - } - - if !proto.Equal(got, want) { - t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go deleted file mode 100644 index a2729c39a..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "testing" -) - -// This is a separate file and package from size_test.go because that one uses -// generated messages and thus may not be in package proto without having a circular -// dependency, whereas this file tests unexported details of size.go. - -func TestVarintSize(t *testing.T) { - // Check the edge cases carefully. - testCases := []struct { - n uint64 - size int - }{ - {0, 1}, - {1, 1}, - {127, 1}, - {128, 2}, - {16383, 2}, - {16384, 3}, - {1<<63 - 1, 9}, - {1 << 63, 10}, - } - for _, tc := range testCases { - size := sizeVarint(tc.n) - if size != tc.size { - t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go deleted file mode 100644 index db5614fd1..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "log" - "strings" - "testing" - - . "github.com/golang/protobuf/proto" - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} - -// messageWithExtension2 is in equal_test.go. -var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} - -func init() { - if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { - log.Panicf("SetExtension: %v", err) - } - - // Force messageWithExtension3 to have the extension encoded. - Marshal(messageWithExtension3) - -} - -var SizeTests = []struct { - desc string - pb Message -}{ - {"empty", &pb.OtherMessage{}}, - // Basic types. - {"bool", &pb.Defaults{F_Bool: Bool(true)}}, - {"int32", &pb.Defaults{F_Int32: Int32(12)}}, - {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, - {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, - {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, - {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, - {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, - {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, - {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, - {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, - {"float", &pb.Defaults{F_Float: Float32(12.6)}}, - {"double", &pb.Defaults{F_Double: Float64(13.9)}}, - {"string", &pb.Defaults{F_String: String("niles")}}, - {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, - {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, - {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, - {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, - {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, - // Repeated. - {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, - {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, - {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, - {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, - {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, - {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ - // Need enough large numbers to verify that the header is counting the number of bytes - // for the field, not the number of elements. - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, - }}}, - {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, - {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, - // Nested. - {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, - {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, - // Other things. - {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, - {"extension (unencoded)", messageWithExtension1}, - {"extension (encoded)", messageWithExtension3}, - // proto3 message - {"proto3 empty", &proto3pb.Message{}}, - {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, - {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, - {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, - {"proto3 float", &proto3pb.Message{Score: 12.6}}, - {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, - {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, - {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, - {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, - {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}}, - - {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, - {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, - {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, - {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}}, - - {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, - {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, - {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, -} - -func TestSize(t *testing.T) { - for _, tc := range SizeTests { - size := Size(tc.pb) - b, err := Marshal(tc.pb) - if err != nil { - t.Errorf("%v: Marshal failed: %v", tc.desc, err) - continue - } - if size != len(b) { - t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) - t.Logf("%v: bytes: %#v", tc.desc, b) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile deleted file mode 100644 index fc288628a..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -include ../../Make.protobuf - -all: regenerate - -regenerate: - rm -f test.pb.go - make test.pb.go - -# The following rules are just aids to development. Not needed for typical testing. - -diff: regenerate - git diff test.pb.go - -restore: - cp test.pb.go.golden test.pb.go - -preserve: - cp test.pb.go test.pb.go.golden diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go deleted file mode 100644 index 7172d0e96..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Verify that the compiler output for test.proto is unchanged. - -package testdata - -import ( - "crypto/sha1" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "testing" -) - -// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. -func sum(t *testing.T, name string) string { - data, err := ioutil.ReadFile(name) - if err != nil { - t.Fatal(err) - } - t.Logf("sum(%q): length is %d", name, len(data)) - hash := sha1.New() - _, err = hash.Write(data) - if err != nil { - t.Fatal(err) - } - return fmt.Sprintf("% x", hash.Sum(nil)) -} - -func run(t *testing.T, name string, args ...string) { - cmd := exec.Command(name, args...) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - if err != nil { - t.Fatal(err) - } -} - -func TestGolden(t *testing.T) { - // Compute the original checksum. - goldenSum := sum(t, "test.pb.go") - // Run the proto compiler. - run(t, "protoc", "--go_out="+os.TempDir(), "test.proto") - newFile := filepath.Join(os.TempDir(), "test.pb.go") - defer os.Remove(newFile) - // Compute the new checksum. - newSum := sum(t, newFile) - // Verify - if newSum != goldenSum { - run(t, "diff", "-u", "test.pb.go", newFile) - t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") - } -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go deleted file mode 100644 index 13674a449..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go +++ /dev/null @@ -1,2746 +0,0 @@ -// Code generated by protoc-gen-go. -// source: test.proto -// DO NOT EDIT! - -/* -Package testdata is a generated protocol buffer package. - -It is generated from these files: - test.proto - -It has these top-level messages: - GoEnum - GoTestField - GoTest - GoSkipTest - NonPackedTest - PackedTest - MaxTag - OldMessage - NewMessage - InnerMessage - OtherMessage - MyMessage - Ext - DefaultsMessage - MyMessageSet - Empty - MessageList - Strings - Defaults - SubDefaults - RepeatedEnum - MoreRepeated - GroupOld - GroupNew - FloatingPoint - MessageWithMap -*/ -package testdata - -import proto "github.com/golang/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -type FOO int32 - -const ( - FOO_FOO1 FOO = 1 -) - -var FOO_name = map[int32]string{ - 1: "FOO1", -} -var FOO_value = map[string]int32{ - "FOO1": 1, -} - -func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p -} -func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) -} -func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") - if err != nil { - return err - } - *x = FOO(value) - return nil -} - -// An enum, for completeness. -type GoTest_KIND int32 - -const ( - GoTest_VOID GoTest_KIND = 0 - // Basic types - GoTest_BOOL GoTest_KIND = 1 - GoTest_BYTES GoTest_KIND = 2 - GoTest_FINGERPRINT GoTest_KIND = 3 - GoTest_FLOAT GoTest_KIND = 4 - GoTest_INT GoTest_KIND = 5 - GoTest_STRING GoTest_KIND = 6 - GoTest_TIME GoTest_KIND = 7 - // Groupings - GoTest_TUPLE GoTest_KIND = 8 - GoTest_ARRAY GoTest_KIND = 9 - GoTest_MAP GoTest_KIND = 10 - // Table types - GoTest_TABLE GoTest_KIND = 11 - // Functions - GoTest_FUNCTION GoTest_KIND = 12 -) - -var GoTest_KIND_name = map[int32]string{ - 0: "VOID", - 1: "BOOL", - 2: "BYTES", - 3: "FINGERPRINT", - 4: "FLOAT", - 5: "INT", - 6: "STRING", - 7: "TIME", - 8: "TUPLE", - 9: "ARRAY", - 10: "MAP", - 11: "TABLE", - 12: "FUNCTION", -} -var GoTest_KIND_value = map[string]int32{ - "VOID": 0, - "BOOL": 1, - "BYTES": 2, - "FINGERPRINT": 3, - "FLOAT": 4, - "INT": 5, - "STRING": 6, - "TIME": 7, - "TUPLE": 8, - "ARRAY": 9, - "MAP": 10, - "TABLE": 11, - "FUNCTION": 12, -} - -func (x GoTest_KIND) Enum() *GoTest_KIND { - p := new(GoTest_KIND) - *p = x - return p -} -func (x GoTest_KIND) String() string { - return proto.EnumName(GoTest_KIND_name, int32(x)) -} -func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") - if err != nil { - return err - } - *x = GoTest_KIND(value) - return nil -} - -type MyMessage_Color int32 - -const ( - MyMessage_RED MyMessage_Color = 0 - MyMessage_GREEN MyMessage_Color = 1 - MyMessage_BLUE MyMessage_Color = 2 -) - -var MyMessage_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var MyMessage_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x MyMessage_Color) Enum() *MyMessage_Color { - p := new(MyMessage_Color) - *p = x - return p -} -func (x MyMessage_Color) String() string { - return proto.EnumName(MyMessage_Color_name, int32(x)) -} -func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") - if err != nil { - return err - } - *x = MyMessage_Color(value) - return nil -} - -type DefaultsMessage_DefaultsEnum int32 - -const ( - DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 - DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 - DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 -) - -var DefaultsMessage_DefaultsEnum_name = map[int32]string{ - 0: "ZERO", - 1: "ONE", - 2: "TWO", -} -var DefaultsMessage_DefaultsEnum_value = map[string]int32{ - "ZERO": 0, - "ONE": 1, - "TWO": 2, -} - -func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { - p := new(DefaultsMessage_DefaultsEnum) - *p = x - return p -} -func (x DefaultsMessage_DefaultsEnum) String() string { - return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) -} -func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") - if err != nil { - return err - } - *x = DefaultsMessage_DefaultsEnum(value) - return nil -} - -type Defaults_Color int32 - -const ( - Defaults_RED Defaults_Color = 0 - Defaults_GREEN Defaults_Color = 1 - Defaults_BLUE Defaults_Color = 2 -) - -var Defaults_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var Defaults_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x Defaults_Color) Enum() *Defaults_Color { - p := new(Defaults_Color) - *p = x - return p -} -func (x Defaults_Color) String() string { - return proto.EnumName(Defaults_Color_name, int32(x)) -} -func (x *Defaults_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") - if err != nil { - return err - } - *x = Defaults_Color(value) - return nil -} - -type RepeatedEnum_Color int32 - -const ( - RepeatedEnum_RED RepeatedEnum_Color = 1 -) - -var RepeatedEnum_Color_name = map[int32]string{ - 1: "RED", -} -var RepeatedEnum_Color_value = map[string]int32{ - "RED": 1, -} - -func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { - p := new(RepeatedEnum_Color) - *p = x - return p -} -func (x RepeatedEnum_Color) String() string { - return proto.EnumName(RepeatedEnum_Color_name, int32(x)) -} -func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") - if err != nil { - return err - } - *x = RepeatedEnum_Color(value) - return nil -} - -type GoEnum struct { - Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoEnum) Reset() { *m = GoEnum{} } -func (m *GoEnum) String() string { return proto.CompactTextString(m) } -func (*GoEnum) ProtoMessage() {} - -func (m *GoEnum) GetFoo() FOO { - if m != nil && m.Foo != nil { - return *m.Foo - } - return FOO_FOO1 -} - -type GoTestField struct { - Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"` - Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTestField) Reset() { *m = GoTestField{} } -func (m *GoTestField) String() string { return proto.CompactTextString(m) } -func (*GoTestField) ProtoMessage() {} - -func (m *GoTestField) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" -} - -func (m *GoTestField) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -type GoTest struct { - // Some typical parameters - Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` - Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"` - Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"` - // Required, repeated and optional foreign fields. - RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"` - RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"` - OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"` - // Required fields of all basic types - F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` - F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` - F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` - F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` - F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` - F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` - F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` - F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` - F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` - F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` - F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` - F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` - F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` - // Repeated fields of all basic types - F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` - F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` - F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` - F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` - F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` - F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` - F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` - F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` - F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` - F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` - F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` - F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` - F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` - // Optional fields of all basic types - F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` - F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` - F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` - F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` - F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` - F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` - F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` - F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` - F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` - F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` - F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` - F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` - F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` - // Default-valued fields of all basic types - F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` - F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` - F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` - F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` - F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` - F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` - F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` - F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` - F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` - F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` - F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` - F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` - F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` - // Packed repeated fields (no string or bytes). - F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` - F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` - F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` - F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` - F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` - F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` - F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` - F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` - F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` - F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` - F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` - Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` - Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` - Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest) Reset() { *m = GoTest{} } -func (m *GoTest) String() string { return proto.CompactTextString(m) } -func (*GoTest) ProtoMessage() {} - -const Default_GoTest_F_BoolDefaulted bool = true -const Default_GoTest_F_Int32Defaulted int32 = 32 -const Default_GoTest_F_Int64Defaulted int64 = 64 -const Default_GoTest_F_Fixed32Defaulted uint32 = 320 -const Default_GoTest_F_Fixed64Defaulted uint64 = 640 -const Default_GoTest_F_Uint32Defaulted uint32 = 3200 -const Default_GoTest_F_Uint64Defaulted uint64 = 6400 -const Default_GoTest_F_FloatDefaulted float32 = 314159 -const Default_GoTest_F_DoubleDefaulted float64 = 271828 -const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" - -var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") - -const Default_GoTest_F_Sint32Defaulted int32 = -32 -const Default_GoTest_F_Sint64Defaulted int64 = -64 - -func (m *GoTest) GetKind() GoTest_KIND { - if m != nil && m.Kind != nil { - return *m.Kind - } - return GoTest_VOID -} - -func (m *GoTest) GetTable() string { - if m != nil && m.Table != nil { - return *m.Table - } - return "" -} - -func (m *GoTest) GetParam() int32 { - if m != nil && m.Param != nil { - return *m.Param - } - return 0 -} - -func (m *GoTest) GetRequiredField() *GoTestField { - if m != nil { - return m.RequiredField - } - return nil -} - -func (m *GoTest) GetRepeatedField() []*GoTestField { - if m != nil { - return m.RepeatedField - } - return nil -} - -func (m *GoTest) GetOptionalField() *GoTestField { - if m != nil { - return m.OptionalField - } - return nil -} - -func (m *GoTest) GetF_BoolRequired() bool { - if m != nil && m.F_BoolRequired != nil { - return *m.F_BoolRequired - } - return false -} - -func (m *GoTest) GetF_Int32Required() int32 { - if m != nil && m.F_Int32Required != nil { - return *m.F_Int32Required - } - return 0 -} - -func (m *GoTest) GetF_Int64Required() int64 { - if m != nil && m.F_Int64Required != nil { - return *m.F_Int64Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Required() uint32 { - if m != nil && m.F_Fixed32Required != nil { - return *m.F_Fixed32Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Required() uint64 { - if m != nil && m.F_Fixed64Required != nil { - return *m.F_Fixed64Required - } - return 0 -} - -func (m *GoTest) GetF_Uint32Required() uint32 { - if m != nil && m.F_Uint32Required != nil { - return *m.F_Uint32Required - } - return 0 -} - -func (m *GoTest) GetF_Uint64Required() uint64 { - if m != nil && m.F_Uint64Required != nil { - return *m.F_Uint64Required - } - return 0 -} - -func (m *GoTest) GetF_FloatRequired() float32 { - if m != nil && m.F_FloatRequired != nil { - return *m.F_FloatRequired - } - return 0 -} - -func (m *GoTest) GetF_DoubleRequired() float64 { - if m != nil && m.F_DoubleRequired != nil { - return *m.F_DoubleRequired - } - return 0 -} - -func (m *GoTest) GetF_StringRequired() string { - if m != nil && m.F_StringRequired != nil { - return *m.F_StringRequired - } - return "" -} - -func (m *GoTest) GetF_BytesRequired() []byte { - if m != nil { - return m.F_BytesRequired - } - return nil -} - -func (m *GoTest) GetF_Sint32Required() int32 { - if m != nil && m.F_Sint32Required != nil { - return *m.F_Sint32Required - } - return 0 -} - -func (m *GoTest) GetF_Sint64Required() int64 { - if m != nil && m.F_Sint64Required != nil { - return *m.F_Sint64Required - } - return 0 -} - -func (m *GoTest) GetF_BoolRepeated() []bool { - if m != nil { - return m.F_BoolRepeated - } - return nil -} - -func (m *GoTest) GetF_Int32Repeated() []int32 { - if m != nil { - return m.F_Int32Repeated - } - return nil -} - -func (m *GoTest) GetF_Int64Repeated() []int64 { - if m != nil { - return m.F_Int64Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed32Repeated() []uint32 { - if m != nil { - return m.F_Fixed32Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed64Repeated() []uint64 { - if m != nil { - return m.F_Fixed64Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint32Repeated() []uint32 { - if m != nil { - return m.F_Uint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint64Repeated() []uint64 { - if m != nil { - return m.F_Uint64Repeated - } - return nil -} - -func (m *GoTest) GetF_FloatRepeated() []float32 { - if m != nil { - return m.F_FloatRepeated - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeated() []float64 { - if m != nil { - return m.F_DoubleRepeated - } - return nil -} - -func (m *GoTest) GetF_StringRepeated() []string { - if m != nil { - return m.F_StringRepeated - } - return nil -} - -func (m *GoTest) GetF_BytesRepeated() [][]byte { - if m != nil { - return m.F_BytesRepeated - } - return nil -} - -func (m *GoTest) GetF_Sint32Repeated() []int32 { - if m != nil { - return m.F_Sint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Sint64Repeated() []int64 { - if m != nil { - return m.F_Sint64Repeated - } - return nil -} - -func (m *GoTest) GetF_BoolOptional() bool { - if m != nil && m.F_BoolOptional != nil { - return *m.F_BoolOptional - } - return false -} - -func (m *GoTest) GetF_Int32Optional() int32 { - if m != nil && m.F_Int32Optional != nil { - return *m.F_Int32Optional - } - return 0 -} - -func (m *GoTest) GetF_Int64Optional() int64 { - if m != nil && m.F_Int64Optional != nil { - return *m.F_Int64Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Optional() uint32 { - if m != nil && m.F_Fixed32Optional != nil { - return *m.F_Fixed32Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Optional() uint64 { - if m != nil && m.F_Fixed64Optional != nil { - return *m.F_Fixed64Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint32Optional() uint32 { - if m != nil && m.F_Uint32Optional != nil { - return *m.F_Uint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint64Optional() uint64 { - if m != nil && m.F_Uint64Optional != nil { - return *m.F_Uint64Optional - } - return 0 -} - -func (m *GoTest) GetF_FloatOptional() float32 { - if m != nil && m.F_FloatOptional != nil { - return *m.F_FloatOptional - } - return 0 -} - -func (m *GoTest) GetF_DoubleOptional() float64 { - if m != nil && m.F_DoubleOptional != nil { - return *m.F_DoubleOptional - } - return 0 -} - -func (m *GoTest) GetF_StringOptional() string { - if m != nil && m.F_StringOptional != nil { - return *m.F_StringOptional - } - return "" -} - -func (m *GoTest) GetF_BytesOptional() []byte { - if m != nil { - return m.F_BytesOptional - } - return nil -} - -func (m *GoTest) GetF_Sint32Optional() int32 { - if m != nil && m.F_Sint32Optional != nil { - return *m.F_Sint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Sint64Optional() int64 { - if m != nil && m.F_Sint64Optional != nil { - return *m.F_Sint64Optional - } - return 0 -} - -func (m *GoTest) GetF_BoolDefaulted() bool { - if m != nil && m.F_BoolDefaulted != nil { - return *m.F_BoolDefaulted - } - return Default_GoTest_F_BoolDefaulted -} - -func (m *GoTest) GetF_Int32Defaulted() int32 { - if m != nil && m.F_Int32Defaulted != nil { - return *m.F_Int32Defaulted - } - return Default_GoTest_F_Int32Defaulted -} - -func (m *GoTest) GetF_Int64Defaulted() int64 { - if m != nil && m.F_Int64Defaulted != nil { - return *m.F_Int64Defaulted - } - return Default_GoTest_F_Int64Defaulted -} - -func (m *GoTest) GetF_Fixed32Defaulted() uint32 { - if m != nil && m.F_Fixed32Defaulted != nil { - return *m.F_Fixed32Defaulted - } - return Default_GoTest_F_Fixed32Defaulted -} - -func (m *GoTest) GetF_Fixed64Defaulted() uint64 { - if m != nil && m.F_Fixed64Defaulted != nil { - return *m.F_Fixed64Defaulted - } - return Default_GoTest_F_Fixed64Defaulted -} - -func (m *GoTest) GetF_Uint32Defaulted() uint32 { - if m != nil && m.F_Uint32Defaulted != nil { - return *m.F_Uint32Defaulted - } - return Default_GoTest_F_Uint32Defaulted -} - -func (m *GoTest) GetF_Uint64Defaulted() uint64 { - if m != nil && m.F_Uint64Defaulted != nil { - return *m.F_Uint64Defaulted - } - return Default_GoTest_F_Uint64Defaulted -} - -func (m *GoTest) GetF_FloatDefaulted() float32 { - if m != nil && m.F_FloatDefaulted != nil { - return *m.F_FloatDefaulted - } - return Default_GoTest_F_FloatDefaulted -} - -func (m *GoTest) GetF_DoubleDefaulted() float64 { - if m != nil && m.F_DoubleDefaulted != nil { - return *m.F_DoubleDefaulted - } - return Default_GoTest_F_DoubleDefaulted -} - -func (m *GoTest) GetF_StringDefaulted() string { - if m != nil && m.F_StringDefaulted != nil { - return *m.F_StringDefaulted - } - return Default_GoTest_F_StringDefaulted -} - -func (m *GoTest) GetF_BytesDefaulted() []byte { - if m != nil && m.F_BytesDefaulted != nil { - return m.F_BytesDefaulted - } - return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) -} - -func (m *GoTest) GetF_Sint32Defaulted() int32 { - if m != nil && m.F_Sint32Defaulted != nil { - return *m.F_Sint32Defaulted - } - return Default_GoTest_F_Sint32Defaulted -} - -func (m *GoTest) GetF_Sint64Defaulted() int64 { - if m != nil && m.F_Sint64Defaulted != nil { - return *m.F_Sint64Defaulted - } - return Default_GoTest_F_Sint64Defaulted -} - -func (m *GoTest) GetF_BoolRepeatedPacked() []bool { - if m != nil { - return m.F_BoolRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { - if m != nil { - return m.F_Int32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { - if m != nil { - return m.F_Int64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Fixed32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Fixed64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Uint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Uint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { - if m != nil { - return m.F_FloatRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { - if m != nil { - return m.F_DoubleRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { - if m != nil { - return m.F_Sint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { - if m != nil { - return m.F_Sint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { - if m != nil { - return m.Requiredgroup - } - return nil -} - -func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { - if m != nil { - return m.Repeatedgroup - } - return nil -} - -func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil -} - -// Required, repeated, and optional groups. -type GoTest_RequiredGroup struct { - RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } -func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RequiredGroup) ProtoMessage() {} - -func (m *GoTest_RequiredGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_RepeatedGroup struct { - RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } -func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RepeatedGroup) ProtoMessage() {} - -func (m *GoTest_RepeatedGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } -func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_OptionalGroup) ProtoMessage() {} - -func (m *GoTest_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -type GoSkipTest struct { - SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` - SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` - SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` - SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` - Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } -func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest) ProtoMessage() {} - -func (m *GoSkipTest) GetSkipInt32() int32 { - if m != nil && m.SkipInt32 != nil { - return *m.SkipInt32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed32() uint32 { - if m != nil && m.SkipFixed32 != nil { - return *m.SkipFixed32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed64() uint64 { - if m != nil && m.SkipFixed64 != nil { - return *m.SkipFixed64 - } - return 0 -} - -func (m *GoSkipTest) GetSkipString() string { - if m != nil && m.SkipString != nil { - return *m.SkipString - } - return "" -} - -func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { - if m != nil { - return m.Skipgroup - } - return nil -} - -type GoSkipTest_SkipGroup struct { - GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` - GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } -func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest_SkipGroup) ProtoMessage() {} - -func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { - if m != nil && m.GroupInt32 != nil { - return *m.GroupInt32 - } - return 0 -} - -func (m *GoSkipTest_SkipGroup) GetGroupString() string { - if m != nil && m.GroupString != nil { - return *m.GroupString - } - return "" -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -type NonPackedTest struct { - A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } -func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } -func (*NonPackedTest) ProtoMessage() {} - -func (m *NonPackedTest) GetA() []int32 { - if m != nil { - return m.A - } - return nil -} - -type PackedTest struct { - B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PackedTest) Reset() { *m = PackedTest{} } -func (m *PackedTest) String() string { return proto.CompactTextString(m) } -func (*PackedTest) ProtoMessage() {} - -func (m *PackedTest) GetB() []int32 { - if m != nil { - return m.B - } - return nil -} - -type MaxTag struct { - // Maximum possible tag number. - LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MaxTag) Reset() { *m = MaxTag{} } -func (m *MaxTag) String() string { return proto.CompactTextString(m) } -func (*MaxTag) ProtoMessage() {} - -func (m *MaxTag) GetLastField() string { - if m != nil && m.LastField != nil { - return *m.LastField - } - return "" -} - -type OldMessage struct { - Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage) Reset() { *m = OldMessage{} } -func (m *OldMessage) String() string { return proto.CompactTextString(m) } -func (*OldMessage) ProtoMessage() {} - -func (m *OldMessage) GetNested() *OldMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *OldMessage) GetNum() int32 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type OldMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } -func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*OldMessage_Nested) ProtoMessage() {} - -func (m *OldMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -type NewMessage struct { - Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - // This is an int32 in OldMessage. - Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage) Reset() { *m = NewMessage{} } -func (m *NewMessage) String() string { return proto.CompactTextString(m) } -func (*NewMessage) ProtoMessage() {} - -func (m *NewMessage) GetNested() *NewMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *NewMessage) GetNum() int64 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type NewMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } -func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*NewMessage_Nested) ProtoMessage() {} - -func (m *NewMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *NewMessage_Nested) GetFoodGroup() string { - if m != nil && m.FoodGroup != nil { - return *m.FoodGroup - } - return "" -} - -type InnerMessage struct { - Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` - Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` - Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *InnerMessage) Reset() { *m = InnerMessage{} } -func (m *InnerMessage) String() string { return proto.CompactTextString(m) } -func (*InnerMessage) ProtoMessage() {} - -const Default_InnerMessage_Port int32 = 4000 - -func (m *InnerMessage) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *InnerMessage) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return Default_InnerMessage_Port -} - -func (m *InnerMessage) GetConnected() bool { - if m != nil && m.Connected != nil { - return *m.Connected - } - return false -} - -type OtherMessage struct { - Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` - Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OtherMessage) Reset() { *m = OtherMessage{} } -func (m *OtherMessage) String() string { return proto.CompactTextString(m) } -func (*OtherMessage) ProtoMessage() {} - -func (m *OtherMessage) GetKey() int64 { - if m != nil && m.Key != nil { - return *m.Key - } - return 0 -} - -func (m *OtherMessage) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *OtherMessage) GetWeight() float32 { - if m != nil && m.Weight != nil { - return *m.Weight - } - return 0 -} - -func (m *OtherMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -type MyMessage struct { - Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` - Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` - Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` - Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` - RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"` - Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` - Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` - // This field becomes [][]byte in the generated code. - RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` - Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage) Reset() { *m = MyMessage{} } -func (m *MyMessage) String() string { return proto.CompactTextString(m) } -func (*MyMessage) ProtoMessage() {} - -var extRange_MyMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessage -} -func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -func (m *MyMessage) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *MyMessage) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MyMessage) GetQuote() string { - if m != nil && m.Quote != nil { - return *m.Quote - } - return "" -} - -func (m *MyMessage) GetPet() []string { - if m != nil { - return m.Pet - } - return nil -} - -func (m *MyMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -func (m *MyMessage) GetOthers() []*OtherMessage { - if m != nil { - return m.Others - } - return nil -} - -func (m *MyMessage) GetRepInner() []*InnerMessage { - if m != nil { - return m.RepInner - } - return nil -} - -func (m *MyMessage) GetBikeshed() MyMessage_Color { - if m != nil && m.Bikeshed != nil { - return *m.Bikeshed - } - return MyMessage_RED -} - -func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { - if m != nil { - return m.Somegroup - } - return nil -} - -func (m *MyMessage) GetRepBytes() [][]byte { - if m != nil { - return m.RepBytes - } - return nil -} - -func (m *MyMessage) GetBigfloat() float64 { - if m != nil && m.Bigfloat != nil { - return *m.Bigfloat - } - return 0 -} - -type MyMessage_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } -func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } -func (*MyMessage_SomeGroup) ProtoMessage() {} - -func (m *MyMessage_SomeGroup) GetGroupField() int32 { - if m != nil && m.GroupField != nil { - return *m.GroupField - } - return 0 -} - -type Ext struct { - Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Ext) Reset() { *m = Ext{} } -func (m *Ext) String() string { return proto.CompactTextString(m) } -func (*Ext) ProtoMessage() {} - -func (m *Ext) GetData() string { - if m != nil && m.Data != nil { - return *m.Data - } - return "" -} - -var E_Ext_More = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*Ext)(nil), - Field: 103, - Name: "testdata.Ext.more", - Tag: "bytes,103,opt,name=more", -} - -var E_Ext_Text = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*string)(nil), - Field: 104, - Name: "testdata.Ext.text", - Tag: "bytes,104,opt,name=text", -} - -var E_Ext_Number = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 105, - Name: "testdata.Ext.number", - Tag: "varint,105,opt,name=number", -} - -type DefaultsMessage struct { - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } -func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } -func (*DefaultsMessage) ProtoMessage() {} - -var extRange_DefaultsMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_DefaultsMessage -} -func (m *DefaultsMessage) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -type MyMessageSet struct { - XXX_extensions map[int32]proto.Extension `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } -func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } -func (*MyMessageSet) ProtoMessage() {} - -func (m *MyMessageSet) Marshal() ([]byte, error) { - return proto.MarshalMessageSet(m.ExtensionMap()) -} -func (m *MyMessageSet) Unmarshal(buf []byte) error { - return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) -} -func (m *MyMessageSet) MarshalJSON() ([]byte, error) { - return proto.MarshalMessageSetJSON(m.XXX_extensions) -} -func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { - return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) -} - -// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler -var _ proto.Marshaler = (*MyMessageSet)(nil) -var _ proto.Unmarshaler = (*MyMessageSet)(nil) - -var extRange_MyMessageSet = []proto.ExtensionRange{ - {100, 2147483646}, -} - -func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessageSet -} -func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension { - if m.XXX_extensions == nil { - m.XXX_extensions = make(map[int32]proto.Extension) - } - return m.XXX_extensions -} - -type Empty struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} - -type MessageList struct { - Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList) Reset() { *m = MessageList{} } -func (m *MessageList) String() string { return proto.CompactTextString(m) } -func (*MessageList) ProtoMessage() {} - -func (m *MessageList) GetMessage() []*MessageList_Message { - if m != nil { - return m.Message - } - return nil -} - -type MessageList_Message struct { - Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` - Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } -func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } -func (*MessageList_Message) ProtoMessage() {} - -func (m *MessageList_Message) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MessageList_Message) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -type Strings struct { - StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` - BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Strings) Reset() { *m = Strings{} } -func (m *Strings) String() string { return proto.CompactTextString(m) } -func (*Strings) ProtoMessage() {} - -func (m *Strings) GetStringField() string { - if m != nil && m.StringField != nil { - return *m.StringField - } - return "" -} - -func (m *Strings) GetBytesField() []byte { - if m != nil { - return m.BytesField - } - return nil -} - -type Defaults struct { - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"` - F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"` - F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"` - F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"` - F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"` - F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"` - F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"` - F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"` - F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"` - F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"` - F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"` - F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"` - F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"` - F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` - // More fields with crazy defaults. - F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"` - F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"` - F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"` - // Sub-message. - Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` - // Redundant but explicit defaults. - StrZero *string `protobuf:"bytes,19,opt,name=str_zero,def=" json:"str_zero,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Defaults) Reset() { *m = Defaults{} } -func (m *Defaults) String() string { return proto.CompactTextString(m) } -func (*Defaults) ProtoMessage() {} - -const Default_Defaults_F_Bool bool = true -const Default_Defaults_F_Int32 int32 = 32 -const Default_Defaults_F_Int64 int64 = 64 -const Default_Defaults_F_Fixed32 uint32 = 320 -const Default_Defaults_F_Fixed64 uint64 = 640 -const Default_Defaults_F_Uint32 uint32 = 3200 -const Default_Defaults_F_Uint64 uint64 = 6400 -const Default_Defaults_F_Float float32 = 314159 -const Default_Defaults_F_Double float64 = 271828 -const Default_Defaults_F_String string = "hello, \"world!\"\n" - -var Default_Defaults_F_Bytes []byte = []byte("Bignose") - -const Default_Defaults_F_Sint32 int32 = -32 -const Default_Defaults_F_Sint64 int64 = -64 -const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN - -var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) -var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) -var Default_Defaults_F_Nan float32 = float32(math.NaN()) - -func (m *Defaults) GetF_Bool() bool { - if m != nil && m.F_Bool != nil { - return *m.F_Bool - } - return Default_Defaults_F_Bool -} - -func (m *Defaults) GetF_Int32() int32 { - if m != nil && m.F_Int32 != nil { - return *m.F_Int32 - } - return Default_Defaults_F_Int32 -} - -func (m *Defaults) GetF_Int64() int64 { - if m != nil && m.F_Int64 != nil { - return *m.F_Int64 - } - return Default_Defaults_F_Int64 -} - -func (m *Defaults) GetF_Fixed32() uint32 { - if m != nil && m.F_Fixed32 != nil { - return *m.F_Fixed32 - } - return Default_Defaults_F_Fixed32 -} - -func (m *Defaults) GetF_Fixed64() uint64 { - if m != nil && m.F_Fixed64 != nil { - return *m.F_Fixed64 - } - return Default_Defaults_F_Fixed64 -} - -func (m *Defaults) GetF_Uint32() uint32 { - if m != nil && m.F_Uint32 != nil { - return *m.F_Uint32 - } - return Default_Defaults_F_Uint32 -} - -func (m *Defaults) GetF_Uint64() uint64 { - if m != nil && m.F_Uint64 != nil { - return *m.F_Uint64 - } - return Default_Defaults_F_Uint64 -} - -func (m *Defaults) GetF_Float() float32 { - if m != nil && m.F_Float != nil { - return *m.F_Float - } - return Default_Defaults_F_Float -} - -func (m *Defaults) GetF_Double() float64 { - if m != nil && m.F_Double != nil { - return *m.F_Double - } - return Default_Defaults_F_Double -} - -func (m *Defaults) GetF_String() string { - if m != nil && m.F_String != nil { - return *m.F_String - } - return Default_Defaults_F_String -} - -func (m *Defaults) GetF_Bytes() []byte { - if m != nil && m.F_Bytes != nil { - return m.F_Bytes - } - return append([]byte(nil), Default_Defaults_F_Bytes...) -} - -func (m *Defaults) GetF_Sint32() int32 { - if m != nil && m.F_Sint32 != nil { - return *m.F_Sint32 - } - return Default_Defaults_F_Sint32 -} - -func (m *Defaults) GetF_Sint64() int64 { - if m != nil && m.F_Sint64 != nil { - return *m.F_Sint64 - } - return Default_Defaults_F_Sint64 -} - -func (m *Defaults) GetF_Enum() Defaults_Color { - if m != nil && m.F_Enum != nil { - return *m.F_Enum - } - return Default_Defaults_F_Enum -} - -func (m *Defaults) GetF_Pinf() float32 { - if m != nil && m.F_Pinf != nil { - return *m.F_Pinf - } - return Default_Defaults_F_Pinf -} - -func (m *Defaults) GetF_Ninf() float32 { - if m != nil && m.F_Ninf != nil { - return *m.F_Ninf - } - return Default_Defaults_F_Ninf -} - -func (m *Defaults) GetF_Nan() float32 { - if m != nil && m.F_Nan != nil { - return *m.F_Nan - } - return Default_Defaults_F_Nan -} - -func (m *Defaults) GetSub() *SubDefaults { - if m != nil { - return m.Sub - } - return nil -} - -func (m *Defaults) GetStrZero() string { - if m != nil && m.StrZero != nil { - return *m.StrZero - } - return "" -} - -type SubDefaults struct { - N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SubDefaults) Reset() { *m = SubDefaults{} } -func (m *SubDefaults) String() string { return proto.CompactTextString(m) } -func (*SubDefaults) ProtoMessage() {} - -const Default_SubDefaults_N int64 = 7 - -func (m *SubDefaults) GetN() int64 { - if m != nil && m.N != nil { - return *m.N - } - return Default_SubDefaults_N -} - -type RepeatedEnum struct { - Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } -func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } -func (*RepeatedEnum) ProtoMessage() {} - -func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { - if m != nil { - return m.Color - } - return nil -} - -type MoreRepeated struct { - Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` - BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` - Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` - IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` - Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"` - Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` - Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } -func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } -func (*MoreRepeated) ProtoMessage() {} - -func (m *MoreRepeated) GetBools() []bool { - if m != nil { - return m.Bools - } - return nil -} - -func (m *MoreRepeated) GetBoolsPacked() []bool { - if m != nil { - return m.BoolsPacked - } - return nil -} - -func (m *MoreRepeated) GetInts() []int32 { - if m != nil { - return m.Ints - } - return nil -} - -func (m *MoreRepeated) GetIntsPacked() []int32 { - if m != nil { - return m.IntsPacked - } - return nil -} - -func (m *MoreRepeated) GetInt64SPacked() []int64 { - if m != nil { - return m.Int64SPacked - } - return nil -} - -func (m *MoreRepeated) GetStrings() []string { - if m != nil { - return m.Strings - } - return nil -} - -func (m *MoreRepeated) GetFixeds() []uint32 { - if m != nil { - return m.Fixeds - } - return nil -} - -type GroupOld struct { - G *GroupOld_G `protobuf:"group,101,opt" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld) Reset() { *m = GroupOld{} } -func (m *GroupOld) String() string { return proto.CompactTextString(m) } -func (*GroupOld) ProtoMessage() {} - -func (m *GroupOld) GetG() *GroupOld_G { - if m != nil { - return m.G - } - return nil -} - -type GroupOld_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } -func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } -func (*GroupOld_G) ProtoMessage() {} - -func (m *GroupOld_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -type GroupNew struct { - G *GroupNew_G `protobuf:"group,101,opt" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew) Reset() { *m = GroupNew{} } -func (m *GroupNew) String() string { return proto.CompactTextString(m) } -func (*GroupNew) ProtoMessage() {} - -func (m *GroupNew) GetG() *GroupNew_G { - if m != nil { - return m.G - } - return nil -} - -type GroupNew_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } -func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } -func (*GroupNew_G) ProtoMessage() {} - -func (m *GroupNew_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *GroupNew_G) GetY() int32 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type FloatingPoint struct { - F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } -func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } -func (*FloatingPoint) ProtoMessage() {} - -func (m *FloatingPoint) GetF() float64 { - if m != nil && m.F != nil { - return *m.F - } - return 0 -} - -type MessageWithMap struct { - NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} - -func (m *MessageWithMap) GetNameMapping() map[int32]string { - if m != nil { - return m.NameMapping - } - return nil -} - -func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { - if m != nil { - return m.MsgMapping - } - return nil -} - -func (m *MessageWithMap) GetByteMapping() map[bool][]byte { - if m != nil { - return m.ByteMapping - } - return nil -} - -func (m *MessageWithMap) GetStrToStr() map[string]string { - if m != nil { - return m.StrToStr - } - return nil -} - -var E_Greeting = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: ([]string)(nil), - Field: 106, - Name: "testdata.greeting", - Tag: "bytes,106,rep,name=greeting", -} - -var E_NoDefaultDouble = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float64)(nil), - Field: 101, - Name: "testdata.no_default_double", - Tag: "fixed64,101,opt,name=no_default_double", -} - -var E_NoDefaultFloat = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float32)(nil), - Field: 102, - Name: "testdata.no_default_float", - Tag: "fixed32,102,opt,name=no_default_float", -} - -var E_NoDefaultInt32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 103, - Name: "testdata.no_default_int32", - Tag: "varint,103,opt,name=no_default_int32", -} - -var E_NoDefaultInt64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 104, - Name: "testdata.no_default_int64", - Tag: "varint,104,opt,name=no_default_int64", -} - -var E_NoDefaultUint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 105, - Name: "testdata.no_default_uint32", - Tag: "varint,105,opt,name=no_default_uint32", -} - -var E_NoDefaultUint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 106, - Name: "testdata.no_default_uint64", - Tag: "varint,106,opt,name=no_default_uint64", -} - -var E_NoDefaultSint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 107, - Name: "testdata.no_default_sint32", - Tag: "zigzag32,107,opt,name=no_default_sint32", -} - -var E_NoDefaultSint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 108, - Name: "testdata.no_default_sint64", - Tag: "zigzag64,108,opt,name=no_default_sint64", -} - -var E_NoDefaultFixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 109, - Name: "testdata.no_default_fixed32", - Tag: "fixed32,109,opt,name=no_default_fixed32", -} - -var E_NoDefaultFixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 110, - Name: "testdata.no_default_fixed64", - Tag: "fixed64,110,opt,name=no_default_fixed64", -} - -var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 111, - Name: "testdata.no_default_sfixed32", - Tag: "fixed32,111,opt,name=no_default_sfixed32", -} - -var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 112, - Name: "testdata.no_default_sfixed64", - Tag: "fixed64,112,opt,name=no_default_sfixed64", -} - -var E_NoDefaultBool = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 113, - Name: "testdata.no_default_bool", - Tag: "varint,113,opt,name=no_default_bool", -} - -var E_NoDefaultString = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*string)(nil), - Field: 114, - Name: "testdata.no_default_string", - Tag: "bytes,114,opt,name=no_default_string", -} - -var E_NoDefaultBytes = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: ([]byte)(nil), - Field: 115, - Name: "testdata.no_default_bytes", - Tag: "bytes,115,opt,name=no_default_bytes", -} - -var E_NoDefaultEnum = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), - Field: 116, - Name: "testdata.no_default_enum", - Tag: "varint,116,opt,name=no_default_enum,enum=testdata.DefaultsMessage_DefaultsEnum", -} - -var E_DefaultDouble = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float64)(nil), - Field: 201, - Name: "testdata.default_double", - Tag: "fixed64,201,opt,name=default_double,def=3.1415", -} - -var E_DefaultFloat = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float32)(nil), - Field: 202, - Name: "testdata.default_float", - Tag: "fixed32,202,opt,name=default_float,def=3.14", -} - -var E_DefaultInt32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 203, - Name: "testdata.default_int32", - Tag: "varint,203,opt,name=default_int32,def=42", -} - -var E_DefaultInt64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 204, - Name: "testdata.default_int64", - Tag: "varint,204,opt,name=default_int64,def=43", -} - -var E_DefaultUint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 205, - Name: "testdata.default_uint32", - Tag: "varint,205,opt,name=default_uint32,def=44", -} - -var E_DefaultUint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 206, - Name: "testdata.default_uint64", - Tag: "varint,206,opt,name=default_uint64,def=45", -} - -var E_DefaultSint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 207, - Name: "testdata.default_sint32", - Tag: "zigzag32,207,opt,name=default_sint32,def=46", -} - -var E_DefaultSint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 208, - Name: "testdata.default_sint64", - Tag: "zigzag64,208,opt,name=default_sint64,def=47", -} - -var E_DefaultFixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 209, - Name: "testdata.default_fixed32", - Tag: "fixed32,209,opt,name=default_fixed32,def=48", -} - -var E_DefaultFixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 210, - Name: "testdata.default_fixed64", - Tag: "fixed64,210,opt,name=default_fixed64,def=49", -} - -var E_DefaultSfixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 211, - Name: "testdata.default_sfixed32", - Tag: "fixed32,211,opt,name=default_sfixed32,def=50", -} - -var E_DefaultSfixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 212, - Name: "testdata.default_sfixed64", - Tag: "fixed64,212,opt,name=default_sfixed64,def=51", -} - -var E_DefaultBool = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 213, - Name: "testdata.default_bool", - Tag: "varint,213,opt,name=default_bool,def=1", -} - -var E_DefaultString = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*string)(nil), - Field: 214, - Name: "testdata.default_string", - Tag: "bytes,214,opt,name=default_string,def=Hello, string", -} - -var E_DefaultBytes = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: ([]byte)(nil), - Field: 215, - Name: "testdata.default_bytes", - Tag: "bytes,215,opt,name=default_bytes,def=Hello, bytes", -} - -var E_DefaultEnum = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), - Field: 216, - Name: "testdata.default_enum", - Tag: "varint,216,opt,name=default_enum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", -} - -var E_X201 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 201, - Name: "testdata.x201", - Tag: "bytes,201,opt,name=x201", -} - -var E_X202 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 202, - Name: "testdata.x202", - Tag: "bytes,202,opt,name=x202", -} - -var E_X203 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 203, - Name: "testdata.x203", - Tag: "bytes,203,opt,name=x203", -} - -var E_X204 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 204, - Name: "testdata.x204", - Tag: "bytes,204,opt,name=x204", -} - -var E_X205 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 205, - Name: "testdata.x205", - Tag: "bytes,205,opt,name=x205", -} - -var E_X206 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 206, - Name: "testdata.x206", - Tag: "bytes,206,opt,name=x206", -} - -var E_X207 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 207, - Name: "testdata.x207", - Tag: "bytes,207,opt,name=x207", -} - -var E_X208 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 208, - Name: "testdata.x208", - Tag: "bytes,208,opt,name=x208", -} - -var E_X209 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 209, - Name: "testdata.x209", - Tag: "bytes,209,opt,name=x209", -} - -var E_X210 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 210, - Name: "testdata.x210", - Tag: "bytes,210,opt,name=x210", -} - -var E_X211 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 211, - Name: "testdata.x211", - Tag: "bytes,211,opt,name=x211", -} - -var E_X212 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 212, - Name: "testdata.x212", - Tag: "bytes,212,opt,name=x212", -} - -var E_X213 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 213, - Name: "testdata.x213", - Tag: "bytes,213,opt,name=x213", -} - -var E_X214 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 214, - Name: "testdata.x214", - Tag: "bytes,214,opt,name=x214", -} - -var E_X215 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 215, - Name: "testdata.x215", - Tag: "bytes,215,opt,name=x215", -} - -var E_X216 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 216, - Name: "testdata.x216", - Tag: "bytes,216,opt,name=x216", -} - -var E_X217 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 217, - Name: "testdata.x217", - Tag: "bytes,217,opt,name=x217", -} - -var E_X218 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 218, - Name: "testdata.x218", - Tag: "bytes,218,opt,name=x218", -} - -var E_X219 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 219, - Name: "testdata.x219", - Tag: "bytes,219,opt,name=x219", -} - -var E_X220 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 220, - Name: "testdata.x220", - Tag: "bytes,220,opt,name=x220", -} - -var E_X221 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 221, - Name: "testdata.x221", - Tag: "bytes,221,opt,name=x221", -} - -var E_X222 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 222, - Name: "testdata.x222", - Tag: "bytes,222,opt,name=x222", -} - -var E_X223 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 223, - Name: "testdata.x223", - Tag: "bytes,223,opt,name=x223", -} - -var E_X224 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 224, - Name: "testdata.x224", - Tag: "bytes,224,opt,name=x224", -} - -var E_X225 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 225, - Name: "testdata.x225", - Tag: "bytes,225,opt,name=x225", -} - -var E_X226 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 226, - Name: "testdata.x226", - Tag: "bytes,226,opt,name=x226", -} - -var E_X227 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 227, - Name: "testdata.x227", - Tag: "bytes,227,opt,name=x227", -} - -var E_X228 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 228, - Name: "testdata.x228", - Tag: "bytes,228,opt,name=x228", -} - -var E_X229 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 229, - Name: "testdata.x229", - Tag: "bytes,229,opt,name=x229", -} - -var E_X230 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 230, - Name: "testdata.x230", - Tag: "bytes,230,opt,name=x230", -} - -var E_X231 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 231, - Name: "testdata.x231", - Tag: "bytes,231,opt,name=x231", -} - -var E_X232 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 232, - Name: "testdata.x232", - Tag: "bytes,232,opt,name=x232", -} - -var E_X233 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 233, - Name: "testdata.x233", - Tag: "bytes,233,opt,name=x233", -} - -var E_X234 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 234, - Name: "testdata.x234", - Tag: "bytes,234,opt,name=x234", -} - -var E_X235 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 235, - Name: "testdata.x235", - Tag: "bytes,235,opt,name=x235", -} - -var E_X236 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 236, - Name: "testdata.x236", - Tag: "bytes,236,opt,name=x236", -} - -var E_X237 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 237, - Name: "testdata.x237", - Tag: "bytes,237,opt,name=x237", -} - -var E_X238 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 238, - Name: "testdata.x238", - Tag: "bytes,238,opt,name=x238", -} - -var E_X239 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 239, - Name: "testdata.x239", - Tag: "bytes,239,opt,name=x239", -} - -var E_X240 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 240, - Name: "testdata.x240", - Tag: "bytes,240,opt,name=x240", -} - -var E_X241 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 241, - Name: "testdata.x241", - Tag: "bytes,241,opt,name=x241", -} - -var E_X242 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 242, - Name: "testdata.x242", - Tag: "bytes,242,opt,name=x242", -} - -var E_X243 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 243, - Name: "testdata.x243", - Tag: "bytes,243,opt,name=x243", -} - -var E_X244 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 244, - Name: "testdata.x244", - Tag: "bytes,244,opt,name=x244", -} - -var E_X245 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 245, - Name: "testdata.x245", - Tag: "bytes,245,opt,name=x245", -} - -var E_X246 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 246, - Name: "testdata.x246", - Tag: "bytes,246,opt,name=x246", -} - -var E_X247 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 247, - Name: "testdata.x247", - Tag: "bytes,247,opt,name=x247", -} - -var E_X248 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 248, - Name: "testdata.x248", - Tag: "bytes,248,opt,name=x248", -} - -var E_X249 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 249, - Name: "testdata.x249", - Tag: "bytes,249,opt,name=x249", -} - -var E_X250 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 250, - Name: "testdata.x250", - Tag: "bytes,250,opt,name=x250", -} - -func init() { - proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) - proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) - proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) - proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) - proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) - proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) - proto.RegisterExtension(E_Ext_More) - proto.RegisterExtension(E_Ext_Text) - proto.RegisterExtension(E_Ext_Number) - proto.RegisterExtension(E_Greeting) - proto.RegisterExtension(E_NoDefaultDouble) - proto.RegisterExtension(E_NoDefaultFloat) - proto.RegisterExtension(E_NoDefaultInt32) - proto.RegisterExtension(E_NoDefaultInt64) - proto.RegisterExtension(E_NoDefaultUint32) - proto.RegisterExtension(E_NoDefaultUint64) - proto.RegisterExtension(E_NoDefaultSint32) - proto.RegisterExtension(E_NoDefaultSint64) - proto.RegisterExtension(E_NoDefaultFixed32) - proto.RegisterExtension(E_NoDefaultFixed64) - proto.RegisterExtension(E_NoDefaultSfixed32) - proto.RegisterExtension(E_NoDefaultSfixed64) - proto.RegisterExtension(E_NoDefaultBool) - proto.RegisterExtension(E_NoDefaultString) - proto.RegisterExtension(E_NoDefaultBytes) - proto.RegisterExtension(E_NoDefaultEnum) - proto.RegisterExtension(E_DefaultDouble) - proto.RegisterExtension(E_DefaultFloat) - proto.RegisterExtension(E_DefaultInt32) - proto.RegisterExtension(E_DefaultInt64) - proto.RegisterExtension(E_DefaultUint32) - proto.RegisterExtension(E_DefaultUint64) - proto.RegisterExtension(E_DefaultSint32) - proto.RegisterExtension(E_DefaultSint64) - proto.RegisterExtension(E_DefaultFixed32) - proto.RegisterExtension(E_DefaultFixed64) - proto.RegisterExtension(E_DefaultSfixed32) - proto.RegisterExtension(E_DefaultSfixed64) - proto.RegisterExtension(E_DefaultBool) - proto.RegisterExtension(E_DefaultString) - proto.RegisterExtension(E_DefaultBytes) - proto.RegisterExtension(E_DefaultEnum) - proto.RegisterExtension(E_X201) - proto.RegisterExtension(E_X202) - proto.RegisterExtension(E_X203) - proto.RegisterExtension(E_X204) - proto.RegisterExtension(E_X205) - proto.RegisterExtension(E_X206) - proto.RegisterExtension(E_X207) - proto.RegisterExtension(E_X208) - proto.RegisterExtension(E_X209) - proto.RegisterExtension(E_X210) - proto.RegisterExtension(E_X211) - proto.RegisterExtension(E_X212) - proto.RegisterExtension(E_X213) - proto.RegisterExtension(E_X214) - proto.RegisterExtension(E_X215) - proto.RegisterExtension(E_X216) - proto.RegisterExtension(E_X217) - proto.RegisterExtension(E_X218) - proto.RegisterExtension(E_X219) - proto.RegisterExtension(E_X220) - proto.RegisterExtension(E_X221) - proto.RegisterExtension(E_X222) - proto.RegisterExtension(E_X223) - proto.RegisterExtension(E_X224) - proto.RegisterExtension(E_X225) - proto.RegisterExtension(E_X226) - proto.RegisterExtension(E_X227) - proto.RegisterExtension(E_X228) - proto.RegisterExtension(E_X229) - proto.RegisterExtension(E_X230) - proto.RegisterExtension(E_X231) - proto.RegisterExtension(E_X232) - proto.RegisterExtension(E_X233) - proto.RegisterExtension(E_X234) - proto.RegisterExtension(E_X235) - proto.RegisterExtension(E_X236) - proto.RegisterExtension(E_X237) - proto.RegisterExtension(E_X238) - proto.RegisterExtension(E_X239) - proto.RegisterExtension(E_X240) - proto.RegisterExtension(E_X241) - proto.RegisterExtension(E_X242) - proto.RegisterExtension(E_X243) - proto.RegisterExtension(E_X244) - proto.RegisterExtension(E_X245) - proto.RegisterExtension(E_X246) - proto.RegisterExtension(E_X247) - proto.RegisterExtension(E_X248) - proto.RegisterExtension(E_X249) - proto.RegisterExtension(E_X250) -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto deleted file mode 100644 index 440dba38d..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto +++ /dev/null @@ -1,480 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// A feature-rich test file for the protocol compiler and libraries. - -syntax = "proto2"; - -package testdata; - -enum FOO { FOO1 = 1; }; - -message GoEnum { - required FOO foo = 1; -} - -message GoTestField { - required string Label = 1; - required string Type = 2; -} - -message GoTest { - // An enum, for completeness. - enum KIND { - VOID = 0; - - // Basic types - BOOL = 1; - BYTES = 2; - FINGERPRINT = 3; - FLOAT = 4; - INT = 5; - STRING = 6; - TIME = 7; - - // Groupings - TUPLE = 8; - ARRAY = 9; - MAP = 10; - - // Table types - TABLE = 11; - - // Functions - FUNCTION = 12; // last tag - }; - - // Some typical parameters - required KIND Kind = 1; - optional string Table = 2; - optional int32 Param = 3; - - // Required, repeated and optional foreign fields. - required GoTestField RequiredField = 4; - repeated GoTestField RepeatedField = 5; - optional GoTestField OptionalField = 6; - - // Required fields of all basic types - required bool F_Bool_required = 10; - required int32 F_Int32_required = 11; - required int64 F_Int64_required = 12; - required fixed32 F_Fixed32_required = 13; - required fixed64 F_Fixed64_required = 14; - required uint32 F_Uint32_required = 15; - required uint64 F_Uint64_required = 16; - required float F_Float_required = 17; - required double F_Double_required = 18; - required string F_String_required = 19; - required bytes F_Bytes_required = 101; - required sint32 F_Sint32_required = 102; - required sint64 F_Sint64_required = 103; - - // Repeated fields of all basic types - repeated bool F_Bool_repeated = 20; - repeated int32 F_Int32_repeated = 21; - repeated int64 F_Int64_repeated = 22; - repeated fixed32 F_Fixed32_repeated = 23; - repeated fixed64 F_Fixed64_repeated = 24; - repeated uint32 F_Uint32_repeated = 25; - repeated uint64 F_Uint64_repeated = 26; - repeated float F_Float_repeated = 27; - repeated double F_Double_repeated = 28; - repeated string F_String_repeated = 29; - repeated bytes F_Bytes_repeated = 201; - repeated sint32 F_Sint32_repeated = 202; - repeated sint64 F_Sint64_repeated = 203; - - // Optional fields of all basic types - optional bool F_Bool_optional = 30; - optional int32 F_Int32_optional = 31; - optional int64 F_Int64_optional = 32; - optional fixed32 F_Fixed32_optional = 33; - optional fixed64 F_Fixed64_optional = 34; - optional uint32 F_Uint32_optional = 35; - optional uint64 F_Uint64_optional = 36; - optional float F_Float_optional = 37; - optional double F_Double_optional = 38; - optional string F_String_optional = 39; - optional bytes F_Bytes_optional = 301; - optional sint32 F_Sint32_optional = 302; - optional sint64 F_Sint64_optional = 303; - - // Default-valued fields of all basic types - optional bool F_Bool_defaulted = 40 [default=true]; - optional int32 F_Int32_defaulted = 41 [default=32]; - optional int64 F_Int64_defaulted = 42 [default=64]; - optional fixed32 F_Fixed32_defaulted = 43 [default=320]; - optional fixed64 F_Fixed64_defaulted = 44 [default=640]; - optional uint32 F_Uint32_defaulted = 45 [default=3200]; - optional uint64 F_Uint64_defaulted = 46 [default=6400]; - optional float F_Float_defaulted = 47 [default=314159.]; - optional double F_Double_defaulted = 48 [default=271828.]; - optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; - optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; - optional sint32 F_Sint32_defaulted = 402 [default = -32]; - optional sint64 F_Sint64_defaulted = 403 [default = -64]; - - // Packed repeated fields (no string or bytes). - repeated bool F_Bool_repeated_packed = 50 [packed=true]; - repeated int32 F_Int32_repeated_packed = 51 [packed=true]; - repeated int64 F_Int64_repeated_packed = 52 [packed=true]; - repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; - repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; - repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; - repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; - repeated float F_Float_repeated_packed = 57 [packed=true]; - repeated double F_Double_repeated_packed = 58 [packed=true]; - repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; - repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; - - // Required, repeated, and optional groups. - required group RequiredGroup = 70 { - required string RequiredField = 71; - }; - - repeated group RepeatedGroup = 80 { - required string RequiredField = 81; - }; - - optional group OptionalGroup = 90 { - required string RequiredField = 91; - }; -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -message GoSkipTest { - required int32 skip_int32 = 11; - required fixed32 skip_fixed32 = 12; - required fixed64 skip_fixed64 = 13; - required string skip_string = 14; - required group SkipGroup = 15 { - required int32 group_int32 = 16; - required string group_string = 17; - } -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -message NonPackedTest { - repeated int32 a = 1; -} - -message PackedTest { - repeated int32 b = 1 [packed=true]; -} - -message MaxTag { - // Maximum possible tag number. - optional string last_field = 536870911; -} - -message OldMessage { - message Nested { - optional string name = 1; - } - optional Nested nested = 1; - - optional int32 num = 2; -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -message NewMessage { - message Nested { - optional string name = 1; - optional string food_group = 2; - } - optional Nested nested = 1; - - // This is an int32 in OldMessage. - optional int64 num = 2; -} - -// Smaller tests for ASCII formatting. - -message InnerMessage { - required string host = 1; - optional int32 port = 2 [default=4000]; - optional bool connected = 3; -} - -message OtherMessage { - optional int64 key = 1; - optional bytes value = 2; - optional float weight = 3; - optional InnerMessage inner = 4; -} - -message MyMessage { - required int32 count = 1; - optional string name = 2; - optional string quote = 3; - repeated string pet = 4; - optional InnerMessage inner = 5; - repeated OtherMessage others = 6; - repeated InnerMessage rep_inner = 12; - - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - }; - optional Color bikeshed = 7; - - optional group SomeGroup = 8 { - optional int32 group_field = 9; - } - - // This field becomes [][]byte in the generated code. - repeated bytes rep_bytes = 10; - - optional double bigfloat = 11; - - extensions 100 to max; -} - -message Ext { - extend MyMessage { - optional Ext more = 103; - optional string text = 104; - optional int32 number = 105; - } - - optional string data = 1; -} - -extend MyMessage { - repeated string greeting = 106; -} - -message DefaultsMessage { - enum DefaultsEnum { - ZERO = 0; - ONE = 1; - TWO = 2; - }; - extensions 100 to max; -} - -extend DefaultsMessage { - optional double no_default_double = 101; - optional float no_default_float = 102; - optional int32 no_default_int32 = 103; - optional int64 no_default_int64 = 104; - optional uint32 no_default_uint32 = 105; - optional uint64 no_default_uint64 = 106; - optional sint32 no_default_sint32 = 107; - optional sint64 no_default_sint64 = 108; - optional fixed32 no_default_fixed32 = 109; - optional fixed64 no_default_fixed64 = 110; - optional sfixed32 no_default_sfixed32 = 111; - optional sfixed64 no_default_sfixed64 = 112; - optional bool no_default_bool = 113; - optional string no_default_string = 114; - optional bytes no_default_bytes = 115; - optional DefaultsMessage.DefaultsEnum no_default_enum = 116; - - optional double default_double = 201 [default = 3.1415]; - optional float default_float = 202 [default = 3.14]; - optional int32 default_int32 = 203 [default = 42]; - optional int64 default_int64 = 204 [default = 43]; - optional uint32 default_uint32 = 205 [default = 44]; - optional uint64 default_uint64 = 206 [default = 45]; - optional sint32 default_sint32 = 207 [default = 46]; - optional sint64 default_sint64 = 208 [default = 47]; - optional fixed32 default_fixed32 = 209 [default = 48]; - optional fixed64 default_fixed64 = 210 [default = 49]; - optional sfixed32 default_sfixed32 = 211 [default = 50]; - optional sfixed64 default_sfixed64 = 212 [default = 51]; - optional bool default_bool = 213 [default = true]; - optional string default_string = 214 [default = "Hello, string"]; - optional bytes default_bytes = 215 [default = "Hello, bytes"]; - optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; -} - -message MyMessageSet { - option message_set_wire_format = true; - extensions 100 to max; -} - -message Empty { -} - -extend MyMessageSet { - optional Empty x201 = 201; - optional Empty x202 = 202; - optional Empty x203 = 203; - optional Empty x204 = 204; - optional Empty x205 = 205; - optional Empty x206 = 206; - optional Empty x207 = 207; - optional Empty x208 = 208; - optional Empty x209 = 209; - optional Empty x210 = 210; - optional Empty x211 = 211; - optional Empty x212 = 212; - optional Empty x213 = 213; - optional Empty x214 = 214; - optional Empty x215 = 215; - optional Empty x216 = 216; - optional Empty x217 = 217; - optional Empty x218 = 218; - optional Empty x219 = 219; - optional Empty x220 = 220; - optional Empty x221 = 221; - optional Empty x222 = 222; - optional Empty x223 = 223; - optional Empty x224 = 224; - optional Empty x225 = 225; - optional Empty x226 = 226; - optional Empty x227 = 227; - optional Empty x228 = 228; - optional Empty x229 = 229; - optional Empty x230 = 230; - optional Empty x231 = 231; - optional Empty x232 = 232; - optional Empty x233 = 233; - optional Empty x234 = 234; - optional Empty x235 = 235; - optional Empty x236 = 236; - optional Empty x237 = 237; - optional Empty x238 = 238; - optional Empty x239 = 239; - optional Empty x240 = 240; - optional Empty x241 = 241; - optional Empty x242 = 242; - optional Empty x243 = 243; - optional Empty x244 = 244; - optional Empty x245 = 245; - optional Empty x246 = 246; - optional Empty x247 = 247; - optional Empty x248 = 248; - optional Empty x249 = 249; - optional Empty x250 = 250; -} - -message MessageList { - repeated group Message = 1 { - required string name = 2; - required int32 count = 3; - } -} - -message Strings { - optional string string_field = 1; - optional bytes bytes_field = 2; -} - -message Defaults { - enum Color { - RED = 0; - GREEN = 1; - BLUE = 2; - } - - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - optional bool F_Bool = 1 [default=true]; - optional int32 F_Int32 = 2 [default=32]; - optional int64 F_Int64 = 3 [default=64]; - optional fixed32 F_Fixed32 = 4 [default=320]; - optional fixed64 F_Fixed64 = 5 [default=640]; - optional uint32 F_Uint32 = 6 [default=3200]; - optional uint64 F_Uint64 = 7 [default=6400]; - optional float F_Float = 8 [default=314159.]; - optional double F_Double = 9 [default=271828.]; - optional string F_String = 10 [default="hello, \"world!\"\n"]; - optional bytes F_Bytes = 11 [default="Bignose"]; - optional sint32 F_Sint32 = 12 [default=-32]; - optional sint64 F_Sint64 = 13 [default=-64]; - optional Color F_Enum = 14 [default=GREEN]; - - // More fields with crazy defaults. - optional float F_Pinf = 15 [default=inf]; - optional float F_Ninf = 16 [default=-inf]; - optional float F_Nan = 17 [default=nan]; - - // Sub-message. - optional SubDefaults sub = 18; - - // Redundant but explicit defaults. - optional string str_zero = 19 [default=""]; -} - -message SubDefaults { - optional int64 n = 1 [default=7]; -} - -message RepeatedEnum { - enum Color { - RED = 1; - } - repeated Color color = 1; -} - -message MoreRepeated { - repeated bool bools = 1; - repeated bool bools_packed = 2 [packed=true]; - repeated int32 ints = 3; - repeated int32 ints_packed = 4 [packed=true]; - repeated int64 int64s_packed = 7 [packed=true]; - repeated string strings = 5; - repeated fixed32 fixeds = 6; -} - -// GroupOld and GroupNew have the same wire format. -// GroupNew has a new field inside a group. - -message GroupOld { - optional group G = 101 { - optional int32 x = 2; - } -} - -message GroupNew { - optional group G = 101 { - optional int32 x = 2; - optional int32 y = 3; - } -} - -message FloatingPoint { - required double f = 1; -} - -message MessageWithMap { - map name_mapping = 1; - map msg_mapping = 2; - map byte_mapping = 3; - map str_to_str = 4; -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index f3db2cf5e..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,769 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - gtNewline = []byte(">\n") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Printf("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -var ( - messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem() -) - -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - -func writeStruct(w *textWriter, sv reflect.Value) error { - if sv.Type() == messageSetType { - return writeMessageSet(w, sv.Addr().Interface().(*MessageSet)) - } - - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := writeAny(w, key, props.mkeyprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } - - // Enums have a String method, so writeAny will work fine. - if err := writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if pv.Type().Implements(extendableProtoType) { - if err := writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - -// writeAny writes an arbitrary field. -func writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Interface().([]byte))); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if tm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else if err := writeStruct(w, v); err != nil { - return err - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeMessageSet(w *textWriter, ms *MessageSet) error { - for _, item := range ms.Item { - id := *item.TypeId - if msd, ok := messageSetMap[id]; ok { - // Known message set type. - if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil { - return err - } - w.indent() - - pb := reflect.New(msd.t.Elem()) - if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { - if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { - return err - } - } else { - if err := writeStruct(w, pb.Elem()); err != nil { - return err - } - } - } else { - // Unknown type. - if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil { - return err - } - w.indent() - if err := writeUnknownStruct(w, item.Message); err != nil { - return err - } - } - w.unindent() - if _, err := w.Write(gtNewline); err != nil { - return err - } - } - return nil -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep := pv.Interface().(extendableProto) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m := ep.ExtensionMap() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -func marshalText(w io.Writer, pb Message, compact bool) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: compact, - } - - if tm, ok := pb.(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { - return marshalText(w, pb, false) -} - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { - var buf bytes.Buffer - marshalText(&buf, pb, false) - return buf.String() -} - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { - var buf bytes.Buffer - marshalText(&buf, pb, true) - return buf.String() -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index 7d0c75719..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,772 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - base := 8 - ss := s[:2] - s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) - if err != nil { - return "", "", err - } - return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b - } - s = s[n:] - return string(bs), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || p.s[0] != '"' { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) { - sprops := GetProperties(st) - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - reqCount := GetProperties(st).reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]". - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - tok = p.next() - if tok.err != nil { - return tok.err - } - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == tok.value { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", tok.value) - } - // Check the extension terminator. - tok = p.next() - if tok.err != nil { - return tok.err - } - if tok.value != "]" { - return p.errorf("unrecognized extension terminator %q", tok.value) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(extendableProto) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - } else { - // This is a normal, non-extension field. - name := tok.value - fi, props, ok := structFieldByName(st, name) - if !ok { - return p.errorf("unknown field name %q in %v", name, st) - } - - dst := sv.Field(fi) - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // Technically the "key" and "value" could come in any order, - // but in practice they won't. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - if err := p.consumeToken("key"); err != nil { - return err - } - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken("value"); err != nil { - return err - } - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken(terminator); err != nil { - return err - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, st.Field(fi).Type); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } else if props.Required { - reqCount-- - } - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. May already exist. - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(at, flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - case reflect.Bool: - // Either "true", "false", 1 or 0. - switch tok.value { - case "true", "1": - fv.SetBool(true) - return nil - case "false", "0": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err - } - pb.Reset() - v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go deleted file mode 100644 index 0754b2626..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go +++ /dev/null @@ -1,511 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "math" - "reflect" - "testing" - - . "github.com/golang/protobuf/proto" - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - . "github.com/golang/protobuf/proto/testdata" -) - -type UnmarshalTextTest struct { - in string - err string // if "", no error expected - out *MyMessage -} - -func buildExtStructTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_More, &Ext{ - Data: String("Hello, world!"), - }) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtDataTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - SetExtension(msg, E_Ext_Text, String("Hello, world!")) - SetExtension(msg, E_Ext_Number, Int32(1729)) - return UnmarshalTextTest{in: text, out: msg} -} - -func buildExtRepStringTest(text string) UnmarshalTextTest { - msg := &MyMessage{ - Count: Int32(42), - } - if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { - panic(err) - } - return UnmarshalTextTest{in: text, out: msg} -} - -var unMarshalTextTests = []UnmarshalTextTest{ - // Basic - { - in: " count:42\n name:\"Dave\" ", - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - }, - }, - - // Empty quoted string - { - in: `count:42 name:""`, - out: &MyMessage{ - Count: Int32(42), - Name: String(""), - }, - }, - - // Quoted string concatenation - { - in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("My name is elsewhere"), - }, - }, - - // Quoted string with escaped apostrophe - { - in: `count:42 name: "HOLIDAY - New Year\'s Day"`, - out: &MyMessage{ - Count: Int32(42), - Name: String("HOLIDAY - New Year's Day"), - }, - }, - - // Quoted string with single quote - { - in: `count:42 name: 'Roger "The Ramster" Ramjet'`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`Roger "The Ramster" Ramjet`), - }, - }, - - // Quoted string with all the accepted special characters from the C++ test - { - in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", - out: &MyMessage{ - Count: Int32(42), - Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), - }, - }, - - // Quoted string with quoted backslash - { - in: `count:42 name: "\\'xyz"`, - out: &MyMessage{ - Count: Int32(42), - Name: String(`\'xyz`), - }, - }, - - // Quoted string with UTF-8 bytes. - { - in: "count:42 name: '\303\277\302\201\xAB'", - out: &MyMessage{ - Count: Int32(42), - Name: String("\303\277\302\201\xAB"), - }, - }, - - // Bad quoted string - { - in: `inner: < host: "\0" >` + "\n", - err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, - }, - - // Number too large for int64 - { - in: "count: 1 others { key: 123456789012345678901 }", - err: "line 1.23: invalid int64: 123456789012345678901", - }, - - // Number too large for int32 - { - in: "count: 1234567890123", - err: "line 1.7: invalid int32: 1234567890123", - }, - - // Number in hexadecimal - { - in: "count: 0x2beef", - out: &MyMessage{ - Count: Int32(0x2beef), - }, - }, - - // Number in octal - { - in: "count: 024601", - out: &MyMessage{ - Count: Int32(024601), - }, - }, - - // Floating point number with "f" suffix - { - in: "count: 4 others:< weight: 17.0f >", - out: &MyMessage{ - Count: Int32(4), - Others: []*OtherMessage{ - { - Weight: Float32(17), - }, - }, - }, - }, - - // Floating point positive infinity - { - in: "count: 4 bigfloat: inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(1)), - }, - }, - - // Floating point negative infinity - { - in: "count: 4 bigfloat: -inf", - out: &MyMessage{ - Count: Int32(4), - Bigfloat: Float64(math.Inf(-1)), - }, - }, - - // Number too large for float32 - { - in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", - err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", - }, - - // Number posing as a quoted string - { - in: `inner: < host: 12 >` + "\n", - err: `line 1.15: invalid string: 12`, - }, - - // Quoted string posing as int32 - { - in: `count: "12"`, - err: `line 1.7: invalid int32: "12"`, - }, - - // Quoted string posing a float32 - { - in: `others:< weight: "17.4" >`, - err: `line 1.17: invalid float32: "17.4"`, - }, - - // Enum - { - in: `count:42 bikeshed: BLUE`, - out: &MyMessage{ - Count: Int32(42), - Bikeshed: MyMessage_BLUE.Enum(), - }, - }, - - // Repeated field - { - in: `count:42 pet: "horsey" pet:"bunny"`, - out: &MyMessage{ - Count: Int32(42), - Pet: []string{"horsey", "bunny"}, - }, - }, - - // Repeated message with/without colon and <>/{} - { - in: `count:42 others:{} others{} others:<> others:{}`, - out: &MyMessage{ - Count: Int32(42), - Others: []*OtherMessage{ - {}, - {}, - {}, - {}, - }, - }, - }, - - // Missing colon for inner message - { - in: `count:42 inner < host: "cauchy.syd" >`, - out: &MyMessage{ - Count: Int32(42), - Inner: &InnerMessage{ - Host: String("cauchy.syd"), - }, - }, - }, - - // Missing colon for string field - { - in: `name "Dave"`, - err: `line 1.5: expected ':', found "\"Dave\""`, - }, - - // Missing colon for int32 field - { - in: `count 42`, - err: `line 1.6: expected ':', found "42"`, - }, - - // Missing required field - { - in: `name: "Pawel"`, - err: `proto: required field "testdata.MyMessage.count" not set`, - out: &MyMessage{ - Name: String("Pawel"), - }, - }, - - // Repeated non-repeated field - { - in: `name: "Rob" name: "Russ"`, - err: `line 1.12: non-repeated field "name" was repeated`, - }, - - // Group - { - in: `count: 17 SomeGroup { group_field: 12 }`, - out: &MyMessage{ - Count: Int32(17), - Somegroup: &MyMessage_SomeGroup{ - GroupField: Int32(12), - }, - }, - }, - - // Semicolon between fields - { - in: `count:3;name:"Calvin"`, - out: &MyMessage{ - Count: Int32(3), - Name: String("Calvin"), - }, - }, - // Comma between fields - { - in: `count:4,name:"Ezekiel"`, - out: &MyMessage{ - Count: Int32(4), - Name: String("Ezekiel"), - }, - }, - - // Extension - buildExtStructTest(`count: 42 [testdata.Ext.more]:`), - buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), - buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), - buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), - - // Big all-in-one - { - in: "count:42 # Meaning\n" + - `name:"Dave" ` + - `quote:"\"I didn't want to go.\"" ` + - `pet:"bunny" ` + - `pet:"kitty" ` + - `pet:"horsey" ` + - `inner:<` + - ` host:"footrest.syd" ` + - ` port:7001 ` + - ` connected:true ` + - `> ` + - `others:<` + - ` key:3735928559 ` + - ` value:"\x01A\a\f" ` + - `> ` + - `others:<` + - " weight:58.9 # Atomic weight of Co\n" + - ` inner:<` + - ` host:"lesha.mtv" ` + - ` port:8002 ` + - ` >` + - `>`, - out: &MyMessage{ - Count: Int32(42), - Name: String("Dave"), - Quote: String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &InnerMessage{ - Host: String("footrest.syd"), - Port: Int32(7001), - Connected: Bool(true), - }, - Others: []*OtherMessage{ - { - Key: Int64(3735928559), - Value: []byte{0x1, 'A', '\a', '\f'}, - }, - { - Weight: Float32(58.9), - Inner: &InnerMessage{ - Host: String("lesha.mtv"), - Port: Int32(8002), - }, - }, - }, - }, - }, -} - -func TestUnmarshalText(t *testing.T) { - for i, test := range unMarshalTextTests { - pb := new(MyMessage) - err := UnmarshalText(test.in, pb) - if test.err == "" { - // We don't expect failure. - if err != nil { - t.Errorf("Test %d: Unexpected error: %v", i, err) - } else if !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } else { - // We do expect failure. - if err == nil { - t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) - } else if err.Error() != test.err { - t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", - i, err.Error(), test.err) - } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { - t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", - i, pb, test.out) - } - } - } -} - -func TestUnmarshalTextCustomMessage(t *testing.T) { - msg := &textMessage{} - if err := UnmarshalText("custom", msg); err != nil { - t.Errorf("Unexpected error from custom unmarshal: %v", err) - } - if UnmarshalText("not custom", msg) == nil { - t.Errorf("Didn't get expected error from custom unmarshal") - } -} - -// Regression test; this caused a panic. -func TestRepeatedEnum(t *testing.T) { - pb := new(RepeatedEnum) - if err := UnmarshalText("color: RED", pb); err != nil { - t.Fatal(err) - } - exp := &RepeatedEnum{ - Color: []RepeatedEnum_Color{RepeatedEnum_RED}, - } - if !Equal(pb, exp) { - t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) - } -} - -func TestProto3TextParsing(t *testing.T) { - m := new(proto3pb.Message) - const in = `name: "Wallace" true_scotsman: true` - want := &proto3pb.Message{ - Name: "Wallace", - TrueScotsman: true, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -func TestMapParsing(t *testing.T) { - m := new(MessageWithMap) - const in = `name_mapping: name_mapping:` + - `msg_mapping:,>` + // separating commas are okay - `msg_mapping>` + // no colon after "value" - `byte_mapping:` - want := &MessageWithMap{ - NameMapping: map[int32]string{ - 1: "Beatles", - 1234: "Feist", - }, - MsgMapping: map[int64]*FloatingPoint{ - -4: {F: Float64(2.0)}, - -2: {F: Float64(4.0)}, - }, - ByteMapping: map[bool][]byte{ - true: []byte("so be it"), - }, - } - if err := UnmarshalText(in, m); err != nil { - t.Fatal(err) - } - if !Equal(m, want) { - t.Errorf("\n got %v\nwant %v", m, want) - } -} - -var benchInput string - -func init() { - benchInput = "count: 4\n" - for i := 0; i < 1000; i++ { - benchInput += "pet: \"fido\"\n" - } - - // Check it is valid input. - pb := new(MyMessage) - err := UnmarshalText(benchInput, pb) - if err != nil { - panic("Bad benchmark input: " + err.Error()) - } -} - -func BenchmarkUnmarshalText(b *testing.B) { - pb := new(MyMessage) - for i := 0; i < b.N; i++ { - UnmarshalText(benchInput, pb) - } - b.SetBytes(int64(len(benchInput))) -} diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go deleted file mode 100644 index 64579e94d..000000000 --- a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go +++ /dev/null @@ -1,450 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto_test - -import ( - "bytes" - "errors" - "io/ioutil" - "math" - "strings" - "testing" - - "github.com/golang/protobuf/proto" - - proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" -) - -// textMessage implements the methods that allow it to marshal and unmarshal -// itself as text. -type textMessage struct { -} - -func (*textMessage) MarshalText() ([]byte, error) { - return []byte("custom"), nil -} - -func (*textMessage) UnmarshalText(bytes []byte) error { - if string(bytes) != "custom" { - return errors.New("expected 'custom'") - } - return nil -} - -func (*textMessage) Reset() {} -func (*textMessage) String() string { return "" } -func (*textMessage) ProtoMessage() {} - -func newTestMessage() *pb.MyMessage { - msg := &pb.MyMessage{ - Count: proto.Int32(42), - Name: proto.String("Dave"), - Quote: proto.String(`"I didn't want to go."`), - Pet: []string{"bunny", "kitty", "horsey"}, - Inner: &pb.InnerMessage{ - Host: proto.String("footrest.syd"), - Port: proto.Int32(7001), - Connected: proto.Bool(true), - }, - Others: []*pb.OtherMessage{ - { - Key: proto.Int64(0xdeadbeef), - Value: []byte{1, 65, 7, 12}, - }, - { - Weight: proto.Float32(6.022), - Inner: &pb.InnerMessage{ - Host: proto.String("lesha.mtv"), - Port: proto.Int32(8002), - }, - }, - }, - Bikeshed: pb.MyMessage_BLUE.Enum(), - Somegroup: &pb.MyMessage_SomeGroup{ - GroupField: proto.Int32(8), - }, - // One normally wouldn't do this. - // This is an undeclared tag 13, as a varint (wire type 0) with value 4. - XXX_unrecognized: []byte{13<<3 | 0, 4}, - } - ext := &pb.Ext{ - Data: proto.String("Big gobs for big rats"), - } - if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { - panic(err) - } - greetings := []string{"adg", "easy", "cow"} - if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { - panic(err) - } - - // Add an unknown extension. We marshal a pb.Ext, and fake the ID. - b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) - if err != nil { - panic(err) - } - b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) - proto.SetRawExtension(msg, 201, b) - - // Extensions can be plain fields, too, so let's test that. - b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) - proto.SetRawExtension(msg, 202, b) - - return msg -} - -const text = `count: 42 -name: "Dave" -quote: "\"I didn't want to go.\"" -pet: "bunny" -pet: "kitty" -pet: "horsey" -inner: < - host: "footrest.syd" - port: 7001 - connected: true -> -others: < - key: 3735928559 - value: "\001A\007\014" -> -others: < - weight: 6.022 - inner: < - host: "lesha.mtv" - port: 8002 - > -> -bikeshed: BLUE -SomeGroup { - group_field: 8 -} -/* 2 unknown bytes */ -13: 4 -[testdata.Ext.more]: < - data: "Big gobs for big rats" -> -[testdata.greeting]: "adg" -[testdata.greeting]: "easy" -[testdata.greeting]: "cow" -/* 13 unknown bytes */ -201: "\t3G skiing" -/* 3 unknown bytes */ -202: 19 -` - -func TestMarshalText(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, newTestMessage()); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != text { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) - } -} - -func TestMarshalTextCustomMessage(t *testing.T) { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, &textMessage{}); err != nil { - t.Fatalf("proto.MarshalText: %v", err) - } - s := buf.String() - if s != "custom" { - t.Errorf("Got %q, expected %q", s, "custom") - } -} -func TestMarshalTextNil(t *testing.T) { - want := "" - tests := []proto.Message{nil, (*pb.MyMessage)(nil)} - for i, test := range tests { - buf := new(bytes.Buffer) - if err := proto.MarshalText(buf, test); err != nil { - t.Fatal(err) - } - if got := buf.String(); got != want { - t.Errorf("%d: got %q want %q", i, got, want) - } - } -} - -func TestMarshalTextUnknownEnum(t *testing.T) { - // The Color enum only specifies values 0-2. - m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} - got := m.String() - const want = `bikeshed:3 ` - if got != want { - t.Errorf("\n got %q\nwant %q", got, want) - } -} - -func BenchmarkMarshalTextBuffered(b *testing.B) { - buf := new(bytes.Buffer) - m := newTestMessage() - for i := 0; i < b.N; i++ { - buf.Reset() - proto.MarshalText(buf, m) - } -} - -func BenchmarkMarshalTextUnbuffered(b *testing.B) { - w := ioutil.Discard - m := newTestMessage() - for i := 0; i < b.N; i++ { - proto.MarshalText(w, m) - } -} - -func compact(src string) string { - // s/[ \n]+/ /g; s/ $//; - dst := make([]byte, len(src)) - space, comment := false, false - j := 0 - for i := 0; i < len(src); i++ { - if strings.HasPrefix(src[i:], "/*") { - comment = true - i++ - continue - } - if comment && strings.HasPrefix(src[i:], "*/") { - comment = false - i++ - continue - } - if comment { - continue - } - c := src[i] - if c == ' ' || c == '\n' { - space = true - continue - } - if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { - space = false - } - if c == '{' { - space = false - } - if space { - dst[j] = ' ' - j++ - space = false - } - dst[j] = c - j++ - } - if space { - dst[j] = ' ' - j++ - } - return string(dst[0:j]) -} - -var compactText = compact(text) - -func TestCompactText(t *testing.T) { - s := proto.CompactTextString(newTestMessage()) - if s != compactText { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) - } -} - -func TestStringEscaping(t *testing.T) { - testCases := []struct { - in *pb.Strings - out string - }{ - { - // Test data from C++ test (TextFormatTest.StringEscape). - // Single divergence: we don't escape apostrophes. - &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, - "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", - }, - { - // Test data from the same C++ test. - &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, - "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", - }, - { - // Some UTF-8. - &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, - `string_field: "\000\001\377\201"` + "\n", - }, - } - - for i, tc := range testCases { - var buf bytes.Buffer - if err := proto.MarshalText(&buf, tc.in); err != nil { - t.Errorf("proto.MarsalText: %v", err) - continue - } - s := buf.String() - if s != tc.out { - t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) - continue - } - - // Check round-trip. - pb := new(pb.Strings) - if err := proto.UnmarshalText(s, pb); err != nil { - t.Errorf("#%d: UnmarshalText: %v", i, err) - continue - } - if !proto.Equal(pb, tc.in) { - t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) - } - } -} - -// A limitedWriter accepts some output before it fails. -// This is a proxy for something like a nearly-full or imminently-failing disk, -// or a network connection that is about to die. -type limitedWriter struct { - b bytes.Buffer - limit int -} - -var outOfSpace = errors.New("proto: insufficient space") - -func (w *limitedWriter) Write(p []byte) (n int, err error) { - var avail = w.limit - w.b.Len() - if avail <= 0 { - return 0, outOfSpace - } - if len(p) <= avail { - return w.b.Write(p) - } - n, _ = w.b.Write(p[:avail]) - return n, outOfSpace -} - -func TestMarshalTextFailing(t *testing.T) { - // Try lots of different sizes to exercise more error code-paths. - for lim := 0; lim < len(text); lim++ { - buf := new(limitedWriter) - buf.limit = lim - err := proto.MarshalText(buf, newTestMessage()) - // We expect a certain error, but also some partial results in the buffer. - if err != outOfSpace { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) - } - s := buf.b.String() - x := text[:buf.limit] - if s != x { - t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) - } - } -} - -func TestFloats(t *testing.T) { - tests := []struct { - f float64 - want string - }{ - {0, "0"}, - {4.7, "4.7"}, - {math.Inf(1), "inf"}, - {math.Inf(-1), "-inf"}, - {math.NaN(), "nan"}, - } - for _, test := range tests { - msg := &pb.FloatingPoint{F: &test.f} - got := strings.TrimSpace(msg.String()) - want := `f:` + test.want - if got != want { - t.Errorf("f=%f: got %q, want %q", test.f, got, want) - } - } -} - -func TestRepeatedNilText(t *testing.T) { - m := &pb.MessageList{ - Message: []*pb.MessageList_Message{ - nil, - &pb.MessageList_Message{ - Name: proto.String("Horse"), - }, - nil, - }, - } - want := `Message -Message { - name: "Horse" -} -Message -` - if s := proto.MarshalTextString(m); s != want { - t.Errorf(" got: %s\nwant: %s", s, want) - } -} - -func TestProto3Text(t *testing.T) { - tests := []struct { - m proto.Message - want string - }{ - // zero message - {&proto3pb.Message{}, ``}, - // zero message except for an empty byte slice - {&proto3pb.Message{Data: []byte{}}, ``}, - // trivial case - {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, - // empty map - {&pb.MessageWithMap{}, ``}, - // non-empty map; map format is the same as a repeated struct, - // and they are sorted by key (numerically for numeric keys). - { - &pb.MessageWithMap{NameMapping: map[int32]string{ - -1: "Negatory", - 7: "Lucky", - 1234: "Feist", - 6345789: "Otis", - }}, - `name_mapping: ` + - `name_mapping: ` + - `name_mapping: ` + - `name_mapping:`, - }, - // map with nil value; not well-defined, but we shouldn't crash - { - &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, - `msg_mapping:`, - }, - } - for _, test := range tests { - got := strings.TrimSpace(test.m.String()) - if got != test.want { - t.Errorf("\n got %s\nwant %s", got, test.want) - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS b/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS deleted file mode 100644 index 824bf2e14..000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,14 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Damian Gryski -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Sebastien Binet diff --git a/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS b/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index 9f54f21ff..000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,36 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Damian Gryski -Jan Mercl <0xjnml@gmail.com> -Kai Backman -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Russ Cox -Sebastien Binet diff --git a/Godeps/_workspace/src/github.com/golang/snappy/LICENSE b/Godeps/_workspace/src/github.com/golang/snappy/LICENSE deleted file mode 100644 index 6050c10f4..000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/golang/snappy/README b/Godeps/_workspace/src/github.com/golang/snappy/README deleted file mode 100644 index 5074bbab8..000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/README +++ /dev/null @@ -1,7 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. diff --git a/Godeps/_workspace/src/github.com/golang/snappy/decode.go b/Godeps/_workspace/src/github.com/golang/snappy/decode.go deleted file mode 100644 index e7f1259a3..000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/decode.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if len(dst) < dLen { - dst = make([]byte, dLen) - } - - var d, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-1]) - case x == 61: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-2]) | uint(src[s-1])<<8 - case x == 62: - s += 4 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16 - case x == 63: - s += 5 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24 - } - length = int(x + 1) - if length <= 0 { - return nil, errors.New("snappy: unsupported literal length") - } - if length > len(dst)-d || length > len(src)-s { - return nil, ErrCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) - - case tagCopy2: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(src[s-2]) | int(src[s-1])<<8 - - case tagCopy4: - return nil, errors.New("snappy: unsupported COPY_4 tag") - } - - end := d + length - if offset > d || end > len(dst) { - return nil, ErrCorrupt - } - for ; d < end; d++ { - dst[d] = dst[d-offset] - } - } - if d != dLen { - return nil, ErrCorrupt - } - return dst[:d], nil -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxUncompressedChunkLen), - buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize), - } -} - -// Reader is an io.Reader than can read Snappy-compressed bytes. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF { - r.err = ErrCorrupt - } - return false - } - return true -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } - if !r.readFull(r.buf[:4]) { - return 0, r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return 0, r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return 0, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return 0, r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if !r.readFull(r.decoded[:n]) { - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.buf[:len(magicBody)]) { - return 0, r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return 0, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return 0, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen]) { - return 0, r.err - } - } -} diff --git a/Godeps/_workspace/src/github.com/golang/snappy/encode.go b/Godeps/_workspace/src/github.com/golang/snappy/encode.go deleted file mode 100644 index f3b5484bc..000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/encode.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "io" -) - -// We limit how far copy back-references can go, the same as the C++ code. -const maxOffset = 1 << 15 - -// emitLiteral writes a literal chunk and returns the number of bytes written. -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - case n < 1<<16: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - case n < 1<<24: - dst[0] = 62<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - i = 4 - case int64(n) < 1<<32: - dst[0] = 63<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - dst[4] = uint8(n >> 24) - i = 5 - default: - panic("snappy: source buffer is too long") - } - if copy(dst[i:], lit) != len(lit) { - panic("snappy: destination buffer is too short") - } - return i + len(lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -func emitCopy(dst []byte, offset, length int) int { - i := 0 - for length > 0 { - x := length - 4 - if 0 <= x && x < 1<<3 && offset < 1<<11 { - dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - i += 2 - break - } - - x = length - if x > 1<<6 { - x = 1 << 6 - } - dst[i+0] = uint8(x-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= x - } - return i -} - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - // Return early if src is short. - if len(src) <= 4 { - if len(src) != 0 { - d += emitLiteral(dst[d:], src) - } - return dst[:d] - } - - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - const maxTableSize = 1 << 14 - shift, tableSize := uint(32-8), 1<<8 - for tableSize < maxTableSize && tableSize < len(src) { - shift-- - tableSize *= 2 - } - var table [maxTableSize]int - - // Iterate over the source bytes. - var ( - s int // The iterator position. - t int // The last position with the same hash as s. - lit int // The start position of any pending literal bytes. - ) - for s+3 < len(src) { - // Update the hash table. - b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] - h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 - p := &table[(h*0x1e35a7bd)>>shift] - // We need to to store values in [-1, inf) in table. To save - // some initialization time, (re)use the table's zero value - // and shift the values against this zero: add 1 on writes, - // subtract 1 on reads. - t, *p = *p-1, s+1 - // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. - if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { - s++ - continue - } - // Otherwise, we have a match. First, emit any pending literal bytes. - if lit != s { - d += emitLiteral(dst[d:], src[lit:s]) - } - // Extend the match to be as long as possible. - s0 := s - s, t = s+4, t+4 - for s < len(src) && src[s] == src[t] { - s++ - t++ - } - // Emit the copied bytes. - d += emitCopy(dst[d:], s-t, s-s0) - lit = s - } - - // Emit any final pending literal bytes and return. - if lit != len(src) { - d += emitLiteral(dst[d:], src[lit:]) - } - return dst[:d] -} - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -func MaxEncodedLen(srcLen int) int { - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - return 32 + srcLen + srcLen/6 -} - -// NewWriter returns a new Writer that compresses to w, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)), - } -} - -// Writer is an io.Writer than can write Snappy-compressed bytes. -type Writer struct { - w io.Writer - err error - enc []byte - buf [checksumSize + chunkHeaderSize]byte - wroteHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - w.wroteHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (n int, errRet error) { - if w.err != nil { - return 0, w.err - } - if !w.wroteHeader { - copy(w.enc, magicChunk) - if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil { - w.err = err - return n, err - } - w.wroteHeader = true - } - for len(p) > 0 { - var uncompressed []byte - if len(p) > maxUncompressedChunkLen { - uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - chunkType := uint8(chunkTypeCompressedData) - chunkBody := Encode(w.enc, uncompressed) - if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 { - chunkType, chunkBody = chunkTypeUncompressedData, uncompressed - } - - chunkLen := 4 + len(chunkBody) - w.buf[0] = chunkType - w.buf[1] = uint8(chunkLen >> 0) - w.buf[2] = uint8(chunkLen >> 8) - w.buf[3] = uint8(chunkLen >> 16) - w.buf[4] = uint8(checksum >> 0) - w.buf[5] = uint8(checksum >> 8) - w.buf[6] = uint8(checksum >> 16) - w.buf[7] = uint8(checksum >> 24) - if _, err := w.w.Write(w.buf[:]); err != nil { - w.err = err - return n, err - } - if _, err := w.w.Write(chunkBody); err != nil { - w.err = err - return n, err - } - n += len(uncompressed) - } - return n, nil -} diff --git a/Godeps/_workspace/src/github.com/golang/snappy/snappy.go b/Godeps/_workspace/src/github.com/golang/snappy/snappy.go deleted file mode 100644 index e98653acf..000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/snappy.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the snappy block-based compression format. -// It aims for very high speeds and reasonable compression. -// -// The C++ snappy implementation is at https://github.com/google/snappy -package snappy - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer supported. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 bytes". - maxUncompressedChunkLen = 65536 -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go b/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go deleted file mode 100644 index f8188f11e..000000000 --- a/Godeps/_workspace/src/github.com/golang/snappy/snappy_test.go +++ /dev/null @@ -1,377 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "path/filepath" - "strings" - "testing" -) - -var ( - download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") - testdata = flag.String("testdata", "testdata", "Directory containing the test data") -) - -func roundtrip(b, ebuf, dbuf []byte) error { - d, err := Decode(dbuf, Encode(ebuf, b)) - if err != nil { - return fmt.Errorf("decoding error: %v", err) - } - if !bytes.Equal(b, d) { - return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d) - } - return nil -} - -func TestEmpty(t *testing.T) { - if err := roundtrip(nil, nil, nil); err != nil { - t.Fatal(err) - } -} - -func TestSmallCopy(t *testing.T) { - for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for i := 0; i < 32; i++ { - s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" - if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { - t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) - } - } - } - } -} - -func TestSmallRand(t *testing.T) { - rng := rand.New(rand.NewSource(27354294)) - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i := range b { - b[i] = uint8(rng.Uint32()) - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestSmallRegular(t *testing.T) { - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i := range b { - b[i] = uint8(i%10 + 'a') - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestInvalidVarint(t *testing.T) { - data := []byte("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00") - if _, err := DecodedLen(data); err != ErrCorrupt { - t.Errorf("DecodedLen: got %v, want ErrCorrupt", err) - } - if _, err := Decode(nil, data); err != ErrCorrupt { - t.Errorf("Decode: got %v, want ErrCorrupt", err) - } - - // The encoded varint overflows 32 bits - data = []byte("\xff\xff\xff\xff\xff\x00") - - if _, err := DecodedLen(data); err != ErrCorrupt { - t.Errorf("DecodedLen: got %v, want ErrCorrupt", err) - } - if _, err := Decode(nil, data); err != ErrCorrupt { - t.Errorf("Decode: got %v, want ErrCorrupt", err) - } -} - -func cmp(a, b []byte) error { - if len(a) != len(b) { - return fmt.Errorf("got %d bytes, want %d", len(a), len(b)) - } - for i := range a { - if a[i] != b[i] { - return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i]) - } - } - return nil -} - -func TestFramingFormat(t *testing.T) { - // src is comprised of alternating 1e5-sized sequences of random - // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen - // because it is larger than maxUncompressedChunkLen (64k). - src := make([]byte, 1e6) - rng := rand.New(rand.NewSource(1)) - for i := 0; i < 10; i++ { - if i%2 == 0 { - for j := 0; j < 1e5; j++ { - src[1e5*i+j] = uint8(rng.Intn(256)) - } - } else { - for j := 0; j < 1e5; j++ { - src[1e5*i+j] = uint8(i) - } - } - } - - buf := new(bytes.Buffer) - if _, err := NewWriter(buf).Write(src); err != nil { - t.Fatalf("Write: encoding: %v", err) - } - dst, err := ioutil.ReadAll(NewReader(buf)) - if err != nil { - t.Fatalf("ReadAll: decoding: %v", err) - } - if err := cmp(dst, src); err != nil { - t.Fatal(err) - } -} - -func TestReaderReset(t *testing.T) { - gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000) - buf := new(bytes.Buffer) - if _, err := NewWriter(buf).Write(gold); err != nil { - t.Fatalf("Write: %v", err) - } - encoded, invalid, partial := buf.String(), "invalid", "partial" - r := NewReader(nil) - for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} { - if s == partial { - r.Reset(strings.NewReader(encoded)) - if _, err := r.Read(make([]byte, 101)); err != nil { - t.Errorf("#%d: %v", i, err) - continue - } - continue - } - r.Reset(strings.NewReader(s)) - got, err := ioutil.ReadAll(r) - switch s { - case encoded: - if err != nil { - t.Errorf("#%d: %v", i, err) - continue - } - if err := cmp(got, gold); err != nil { - t.Errorf("#%d: %v", i, err) - continue - } - case invalid: - if err == nil { - t.Errorf("#%d: got nil error, want non-nil", i) - continue - } - } - } -} - -func TestWriterReset(t *testing.T) { - gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000) - var gots, wants [][]byte - const n = 20 - w, failed := NewWriter(nil), false - for i := 0; i <= n; i++ { - buf := new(bytes.Buffer) - w.Reset(buf) - want := gold[:len(gold)*i/n] - if _, err := w.Write(want); err != nil { - t.Errorf("#%d: Write: %v", i, err) - failed = true - continue - } - got, err := ioutil.ReadAll(NewReader(buf)) - if err != nil { - t.Errorf("#%d: ReadAll: %v", i, err) - failed = true - continue - } - gots = append(gots, got) - wants = append(wants, want) - } - if failed { - return - } - for i := range gots { - if err := cmp(gots[i], wants[i]); err != nil { - t.Errorf("#%d: %v", i, err) - } - } -} - -func benchDecode(b *testing.B, src []byte) { - encoded := Encode(nil, src) - // Bandwidth is in amount of uncompressed data. - b.SetBytes(int64(len(src))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Decode(src, encoded) - } -} - -func benchEncode(b *testing.B, src []byte) { - // Bandwidth is in amount of uncompressed data. - b.SetBytes(int64(len(src))) - dst := make([]byte, MaxEncodedLen(len(src))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Encode(dst, src) - } -} - -func readFile(b testing.TB, filename string) []byte { - src, err := ioutil.ReadFile(filename) - if err != nil { - b.Skipf("skipping benchmark: %v", err) - } - if len(src) == 0 { - b.Fatalf("%s has zero length", filename) - } - return src -} - -// expand returns a slice of length n containing repeated copies of src. -func expand(src []byte, n int) []byte { - dst := make([]byte, n) - for x := dst; len(x) > 0; { - i := copy(x, src) - x = x[i:] - } - return dst -} - -func benchWords(b *testing.B, n int, decode bool) { - // Note: the file is OS-language dependent so the resulting values are not - // directly comparable for non-US-English OS installations. - data := expand(readFile(b, "/usr/share/dict/words"), n) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } -func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } -func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } -func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } -func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } -func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } -func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } -func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } - -// testFiles' values are copied directly from -// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc -// The label field is unused in snappy-go. -var testFiles = []struct { - label string - filename string -}{ - {"html", "html"}, - {"urls", "urls.10K"}, - {"jpg", "fireworks.jpeg"}, - {"jpg_200", "fireworks.jpeg"}, - {"pdf", "paper-100k.pdf"}, - {"html4", "html_x_4"}, - {"txt1", "alice29.txt"}, - {"txt2", "asyoulik.txt"}, - {"txt3", "lcet10.txt"}, - {"txt4", "plrabn12.txt"}, - {"pb", "geo.protodata"}, - {"gaviota", "kppkn.gtb"}, -} - -// The test data files are present at this canonical URL. -const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/" - -func downloadTestdata(b *testing.B, basename string) (errRet error) { - filename := filepath.Join(*testdata, basename) - if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { - return nil - } - - if !*download { - b.Skipf("test data not found; skipping benchmark without the -download flag") - } - // Download the official snappy C++ implementation reference test data - // files for benchmarking. - if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) { - return fmt.Errorf("failed to create testdata: %s", err) - } - - f, err := os.Create(filename) - if err != nil { - return fmt.Errorf("failed to create %s: %s", filename, err) - } - defer f.Close() - defer func() { - if errRet != nil { - os.Remove(filename) - } - }() - url := baseURL + basename - resp, err := http.Get(url) - if err != nil { - return fmt.Errorf("failed to download %s: %s", url, err) - } - defer resp.Body.Close() - if s := resp.StatusCode; s != http.StatusOK { - return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s)) - } - _, err = io.Copy(f, resp.Body) - if err != nil { - return fmt.Errorf("failed to download %s to %s: %s", url, filename, err) - } - return nil -} - -func benchFile(b *testing.B, n int, decode bool) { - if err := downloadTestdata(b, testFiles[n].filename); err != nil { - b.Fatalf("failed to download testdata: %s", err) - } - data := readFile(b, filepath.Join(*testdata, testFiles[n].filename)) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -// Naming convention is kept similar to what snappy's C++ implementation uses. -func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } -func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } -func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } -func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } -func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } -func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } -func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } -func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } -func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } -func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } -func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } -func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } -func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } -func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } -func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } -func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } -func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } -func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } -func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } -func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } -func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } -func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } -func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } -func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } diff --git a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/README b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/README deleted file mode 100644 index 4d34e87af..000000000 --- a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/README +++ /dev/null @@ -1,36 +0,0 @@ -PACKAGE - -package shellquote - import "github.com/kballard/go-shellquote" - - Shellquote provides utilities for joining/splitting strings using sh's - word-splitting rules. - -VARIABLES - -var ( - UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string") - UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string") - UnterminatedEscapeError = errors.New("Unterminated backslash-escape") -) - - -FUNCTIONS - -func Join(args ...string) string - Join quotes each argument and joins them with a space. If passed to - /bin/sh, the resulting string will be split back into the original - arguments. - -func Split(input string) (words []string, err error) - Split splits a string according to /bin/sh's word-splitting rules. It - supports backslash-escapes, single-quotes, and double-quotes. Notably it - does not support the $'' style of quoting. It also doesn't attempt to - perform any other sort of expansion, including brace expansion, shell - expansion, or pathname expansion. - - If the given input has an unterminated quoted string or ends in a - backslash-escape, one of UnterminatedSingleQuoteError, - UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned. - - diff --git a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/both_test.go b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/both_test.go deleted file mode 100644 index 9cba3c849..000000000 --- a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/both_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package shellquote - -import ( - "reflect" - "testing" - "testing/quick" -) - -// this is called bothtest because it tests Split and Join together - -func TestJoinSplit(t *testing.T) { - f := func(strs []string) bool { - // Join, then split, the input - combined := Join(strs...) - split, err := Split(combined) - if err != nil { - t.Logf("Error splitting %#v: %v", combined, err) - return false - } - if !reflect.DeepEqual(strs, split) { - t.Logf("Input %q did not match output %q", strs, split) - return false - } - return true - } - if err := quick.Check(f, nil); err != nil { - t.Error(err) - } -} diff --git a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/doc.go b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/doc.go deleted file mode 100644 index 9445fa4ad..000000000 --- a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Shellquote provides utilities for joining/splitting strings using sh's -// word-splitting rules. -package shellquote diff --git a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/quote.go b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/quote.go deleted file mode 100644 index f6cacee0f..000000000 --- a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/quote.go +++ /dev/null @@ -1,102 +0,0 @@ -package shellquote - -import ( - "bytes" - "strings" - "unicode/utf8" -) - -// Join quotes each argument and joins them with a space. -// If passed to /bin/sh, the resulting string will be split back into the -// original arguments. -func Join(args ...string) string { - var buf bytes.Buffer - for i, arg := range args { - if i != 0 { - buf.WriteByte(' ') - } - quote(arg, &buf) - } - return buf.String() -} - -const ( - specialChars = "\\'\"`${[|&;<>()*?!" - extraSpecialChars = " \t\n" - prefixChars = "~" -) - -func quote(word string, buf *bytes.Buffer) { - // We want to try to produce a "nice" output. As such, we will - // backslash-escape most characters, but if we encounter a space, or if we - // encounter an extra-special char (which doesn't work with - // backslash-escaping) we switch over to quoting the whole word. We do this - // with a space because it's typically easier for people to read multi-word - // arguments when quoted with a space rather than with ugly backslashes - // everywhere. - origLen := buf.Len() - - if len(word) == 0 { - // oops, no content - buf.WriteString("''") - return - } - - cur, prev := word, word - atStart := true - for len(cur) > 0 { - c, l := utf8.DecodeRuneInString(cur) - cur = cur[l:] - if strings.ContainsRune(specialChars, c) || (atStart && strings.ContainsRune(prefixChars, c)) { - // copy the non-special chars up to this point - if len(cur) < len(prev) { - buf.WriteString(word[0 : len(prev)-len(cur)-l]) - } - buf.WriteByte('\\') - buf.WriteRune(c) - prev = cur - } else if strings.ContainsRune(extraSpecialChars, c) { - // start over in quote mode - buf.Truncate(origLen) - goto quote - } - atStart = false - } - if len(prev) > 0 { - buf.WriteString(prev) - } - return - -quote: - // quote mode - // Use single-quotes, but if we find a single-quote in the word, we need - // to terminate the string, emit an escaped quote, and start the string up - // again - inQuote := false - for len(word) > 0 { - i := strings.IndexRune(word, '\'') - if i == -1 { - break - } - if i > 0 { - if !inQuote { - buf.WriteByte('\'') - inQuote = true - } - buf.WriteString(word[0:i]) - word = word[i+1:] - } - if inQuote { - buf.WriteByte('\'') - inQuote = false - } - buf.WriteString("\\'") - } - if len(word) > 0 { - if !inQuote { - buf.WriteByte('\'') - } - buf.WriteString(word) - buf.WriteByte('\'') - } -} diff --git a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/quote_test.go b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/quote_test.go deleted file mode 100644 index a4d2d82fb..000000000 --- a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/quote_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package shellquote - -import ( - "testing" -) - -func TestSimpleJoin(t *testing.T) { - for _, elem := range simpleJoinTest { - output := Join(elem.input...) - if output != elem.output { - t.Errorf("Input %q, got %q, expected %q", elem.input, output, elem.output) - } - } -} - -var simpleJoinTest = []struct { - input []string - output string -}{ - {[]string{"test"}, "test"}, - {[]string{"hello goodbye"}, "'hello goodbye'"}, - {[]string{"hello", "goodbye"}, "hello goodbye"}, - {[]string{"don't you know the dewey decimal system?"}, "'don'\\''t you know the dewey decimal system?'"}, - {[]string{"don't", "you", "know", "the", "dewey", "decimal", "system?"}, "don\\'t you know the dewey decimal system\\?"}, - {[]string{"~user", "u~ser", " ~user", "!~user"}, "\\~user u~ser ' ~user' \\!~user"}, - {[]string{"foo*", "M{ovies,usic}", "ab[cd]", "%3"}, "foo\\* M\\{ovies,usic} ab\\[cd] %3"}, - {[]string{"one", "", "three"}, "one '' three"}, -} diff --git a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/unquote.go b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/unquote.go deleted file mode 100644 index ba3a0f227..000000000 --- a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/unquote.go +++ /dev/null @@ -1,144 +0,0 @@ -package shellquote - -import ( - "bytes" - "errors" - "strings" - "unicode/utf8" -) - -var ( - UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string") - UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string") - UnterminatedEscapeError = errors.New("Unterminated backslash-escape") -) - -var ( - splitChars = " \n\t" - singleChar = '\'' - doubleChar = '"' - escapeChar = '\\' - doubleEscapeChars = "$`\"\n\\" -) - -// Split splits a string according to /bin/sh's word-splitting rules. It -// supports backslash-escapes, single-quotes, and double-quotes. Notably it does -// not support the $'' style of quoting. It also doesn't attempt to perform any -// other sort of expansion, including brace expansion, shell expansion, or -// pathname expansion. -// -// If the given input has an unterminated quoted string or ends in a -// backslash-escape, one of UnterminatedSingleQuoteError, -// UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned. -func Split(input string) (words []string, err error) { - var buf bytes.Buffer - words = make([]string, 0) - - for len(input) > 0 { - // skip any splitChars at the start - c, l := utf8.DecodeRuneInString(input) - if strings.ContainsRune(splitChars, c) { - input = input[l:] - continue - } - - var word string - word, input, err = splitWord(input, &buf) - if err != nil { - return - } - words = append(words, word) - } - return -} - -func splitWord(input string, buf *bytes.Buffer) (word string, remainder string, err error) { - buf.Reset() - -raw: - { - cur := input - for len(cur) > 0 { - c, l := utf8.DecodeRuneInString(cur) - cur = cur[l:] - if c == singleChar { - buf.WriteString(input[0 : len(input)-len(cur)-l]) - input = cur - goto single - } else if c == doubleChar { - buf.WriteString(input[0 : len(input)-len(cur)-l]) - input = cur - goto double - } else if c == escapeChar { - buf.WriteString(input[0 : len(input)-len(cur)-l]) - input = cur - goto escape - } else if strings.ContainsRune(splitChars, c) { - buf.WriteString(input[0 : len(input)-len(cur)-l]) - return buf.String(), cur, nil - } - } - if len(input) > 0 { - buf.WriteString(input) - input = "" - } - goto done - } - -escape: - { - if len(input) == 0 { - return "", "", UnterminatedEscapeError - } - c, l := utf8.DecodeRuneInString(input) - if c == '\n' { - // a backslash-escaped newline is elided from the output entirely - } else { - buf.WriteString(input[:l]) - } - input = input[l:] - } - goto raw - -single: - { - i := strings.IndexRune(input, singleChar) - if i == -1 { - return "", "", UnterminatedSingleQuoteError - } - buf.WriteString(input[0:i]) - input = input[i+1:] - goto raw - } - -double: - { - cur := input - for len(cur) > 0 { - c, l := utf8.DecodeRuneInString(cur) - cur = cur[l:] - if c == doubleChar { - buf.WriteString(input[0 : len(input)-len(cur)-l]) - input = cur - goto raw - } else if c == escapeChar { - // bash only supports certain escapes in double-quoted strings - c2, l2 := utf8.DecodeRuneInString(cur) - cur = cur[l2:] - if strings.ContainsRune(doubleEscapeChars, c2) { - buf.WriteString(input[0 : len(input)-len(cur)-l-l2]) - if c2 == '\n' { - // newline is special, skip the backslash entirely - } else { - buf.WriteRune(c2) - } - input = cur - } - } - } - return "", "", UnterminatedDoubleQuoteError - } - -done: - return buf.String(), input, nil -} diff --git a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/unquote_test.go b/Godeps/_workspace/src/github.com/gonuts/go-shellquote/unquote_test.go deleted file mode 100644 index 32ea5144b..000000000 --- a/Godeps/_workspace/src/github.com/gonuts/go-shellquote/unquote_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package shellquote - -import ( - "reflect" - "testing" -) - -func TestSimpleSplit(t *testing.T) { - for _, elem := range simpleSplitTest { - output, err := Split(elem.input) - if err != nil { - t.Errorf("Input %q, got error %#v", elem.input, err) - } else if !reflect.DeepEqual(output, elem.output) { - t.Errorf("Input %q, got %q, expected %q", elem.input, output, elem.output) - } - } -} - -func TestErrorSplit(t *testing.T) { - for _, elem := range errorSplitTest { - _, err := Split(elem.input) - if err != elem.error { - t.Errorf("Input %q, got error %#v, expected error %#v", elem.input, err, elem.error) - } - } -} - -var simpleSplitTest = []struct { - input string - output []string -}{ - {"hello", []string{"hello"}}, - {"hello goodbye", []string{"hello", "goodbye"}}, - {"hello goodbye", []string{"hello", "goodbye"}}, - {"glob* test?", []string{"glob*", "test?"}}, - {"don\\'t you know the dewey decimal system\\?", []string{"don't", "you", "know", "the", "dewey", "decimal", "system?"}}, - {"'don'\\''t you know the dewey decimal system?'", []string{"don't you know the dewey decimal system?"}}, - {"one '' two", []string{"one", "", "two"}}, - {"text with\\\na newline", []string{"text", "witha", "newline"}}, - {"\"quoted\\d\\\\\\\" text with a\\\nnewline\"", []string{"quoted\\d\\\" text with anewline"}}, - {"foo\"bar\"baz", []string{"foobarbaz"}}, -} - -var errorSplitTest = []struct { - input string - error error -}{ - {"don't worry", UnterminatedSingleQuoteError}, - {"'test'\\''ing", UnterminatedSingleQuoteError}, - {"\"foo'bar", UnterminatedDoubleQuoteError}, - {"foo\\", UnterminatedEscapeError}, -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/0doc.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/0doc.go deleted file mode 100644 index c14d810a7..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/0doc.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -/* -High Performance, Feature-Rich Idiomatic Go encoding library for msgpack and binc . - -Supported Serialization formats are: - - - msgpack: [https://github.com/msgpack/msgpack] - - binc: [http://github.com/ugorji/binc] - -To install: - - go get github.com/ugorji/go/codec - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. - This was achieved by taking extreme care on: - - managing allocation - - function frame size (important due to Go's use of split stacks), - - reflection use (and by-passing reflection for common types) - - recursion implications - - zero-copy mode (encoding/decoding to byte slice without using temp buffers) - - Correct. - Care was taken to precisely handle corner cases like: - overflows, nil maps and slices, nil value in stream, etc. - - Efficient zero-copying into temporary byte buffers - when encoding into or decoding from a byte slice. - - Standard field renaming via tags - - Encoding from any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Decoding into pointer to any non-nil typed value - (struct, slice, map, int, float32, bool, string, reflect.Value, etc) - - Supports extension functions to handle the encode/decode of custom types - - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler - - Schema-less decoding - (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Msgpack Specific: - - Provides extension functions to handle spec-defined extensions (binary, timestamp) - - Options to resolve ambiguities in handling raw bytes (as string or []byte) - during schema-less decoding (decoding into a nil interface{}) - - RPC Server/Client Codec for msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - - Fast Paths for some container types: - For some container types, we circumvent reflection and its associated overhead - and allocation costs, and encode/decode directly. These types are: - []interface{} - []int - []string - map[interface{}]interface{} - map[int]interface{} - map[string]interface{} - -Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -Usage - -Typical usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -Representative Benchmark Results - -Run the benchmark suite using: - go test -bi -bench=. -benchmem - -To run full benchmark suite (including against vmsgpack and bson), -see notes in ext_dep_test.go - -*/ -package codec diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/README.md b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/README.md deleted file mode 100644 index 6c95d1bfd..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/README.md +++ /dev/null @@ -1,174 +0,0 @@ -# Codec - -High Performance and Feature-Rich Idiomatic Go Library providing -encode/decode support for different serialization formats. - -Supported Serialization formats are: - - - msgpack: [https://github.com/msgpack/msgpack] - - binc: [http://github.com/ugorji/binc] - -To install: - - go get github.com/ugorji/go/codec - -Online documentation: [http://godoc.org/github.com/ugorji/go/codec] - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. - This was achieved by taking extreme care on: - - managing allocation - - function frame size (important due to Go's use of split stacks), - - reflection use (and by-passing reflection for common types) - - recursion implications - - zero-copy mode (encoding/decoding to byte slice without using temp buffers) - - Correct. - Care was taken to precisely handle corner cases like: - overflows, nil maps and slices, nil value in stream, etc. - - Efficient zero-copying into temporary byte buffers - when encoding into or decoding from a byte slice. - - Standard field renaming via tags - - Encoding from any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Decoding into pointer to any non-nil typed value - (struct, slice, map, int, float32, bool, string, reflect.Value, etc) - - Supports extension functions to handle the encode/decode of custom types - - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler - - Schema-less decoding - (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Msgpack Specific: - - Provides extension functions to handle spec-defined extensions (binary, timestamp) - - Options to resolve ambiguities in handling raw bytes (as string or []byte) - during schema-less decoding (decoding into a nil interface{}) - - RPC Server/Client Codec for msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - - Fast Paths for some container types: - For some container types, we circumvent reflection and its associated overhead - and allocation costs, and encode/decode directly. These types are: - []interface{} - []int - []string - map[interface{}]interface{} - map[int]interface{} - map[string]interface{} - -## Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -## RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -## Usage - -Typical usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - -## Representative Benchmark Results - -A sample run of benchmark using "go test -bi -bench=. -benchmem": - - /proc/cpuinfo: Intel(R) Core(TM) i7-2630QM CPU @ 2.00GHz (HT) - - .............................................. - BENCHMARK INIT: 2013-10-16 11:02:50.345970786 -0400 EDT - To run full benchmark comparing encodings (MsgPack, Binc, JSON, GOB, etc), use: "go test -bench=." - Benchmark: - Struct recursive Depth: 1 - ApproxDeepSize Of benchmark Struct: 4694 bytes - Benchmark One-Pass Run: - v-msgpack: len: 1600 bytes - bson: len: 3025 bytes - msgpack: len: 1560 bytes - binc: len: 1187 bytes - gob: len: 1972 bytes - json: len: 2538 bytes - .............................................. - PASS - Benchmark__Msgpack____Encode 50000 54359 ns/op 14953 B/op 83 allocs/op - Benchmark__Msgpack____Decode 10000 106531 ns/op 14990 B/op 410 allocs/op - Benchmark__Binc_NoSym_Encode 50000 53956 ns/op 14966 B/op 83 allocs/op - Benchmark__Binc_NoSym_Decode 10000 103751 ns/op 14529 B/op 386 allocs/op - Benchmark__Binc_Sym___Encode 50000 65961 ns/op 17130 B/op 88 allocs/op - Benchmark__Binc_Sym___Decode 10000 106310 ns/op 15857 B/op 287 allocs/op - Benchmark__Gob________Encode 10000 135944 ns/op 21189 B/op 237 allocs/op - Benchmark__Gob________Decode 5000 405390 ns/op 83460 B/op 1841 allocs/op - Benchmark__Json_______Encode 20000 79412 ns/op 13874 B/op 102 allocs/op - Benchmark__Json_______Decode 10000 247979 ns/op 14202 B/op 493 allocs/op - Benchmark__Bson_______Encode 10000 121762 ns/op 27814 B/op 514 allocs/op - Benchmark__Bson_______Decode 10000 162126 ns/op 16514 B/op 789 allocs/op - Benchmark__VMsgpack___Encode 50000 69155 ns/op 12370 B/op 344 allocs/op - Benchmark__VMsgpack___Decode 10000 151609 ns/op 20307 B/op 571 allocs/op - ok ugorji.net/codec 30.827s - -To run full benchmark suite (including against vmsgpack and bson), -see notes in ext\_dep\_test.go - diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/bench_test.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/bench_test.go deleted file mode 100644 index 4d437035e..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/bench_test.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "flag" - "fmt" - "reflect" - "runtime" - "testing" - "time" -) - -// Sample way to run: -// go test -bi -bv -bd=1 -benchmem -bench=. - -var ( - _ = fmt.Printf - benchTs *TestStruc - - approxSize int - - benchDoInitBench bool - benchVerify bool - benchUnscientificRes bool = false - //depth of 0 maps to ~400bytes json-encoded string, 1 maps to ~1400 bytes, etc - //For depth>1, we likely trigger stack growth for encoders, making benchmarking unreliable. - benchDepth int - benchInitDebug bool - benchCheckers []benchChecker -) - -type benchEncFn func(interface{}) ([]byte, error) -type benchDecFn func([]byte, interface{}) error -type benchIntfFn func() interface{} - -type benchChecker struct { - name string - encodefn benchEncFn - decodefn benchDecFn -} - -func benchInitFlags() { - flag.BoolVar(&benchInitDebug, "bg", false, "Bench Debug") - flag.IntVar(&benchDepth, "bd", 1, "Bench Depth: If >1, potential unreliable results due to stack growth") - flag.BoolVar(&benchDoInitBench, "bi", false, "Run Bench Init") - flag.BoolVar(&benchVerify, "bv", false, "Verify Decoded Value during Benchmark") - flag.BoolVar(&benchUnscientificRes, "bu", false, "Show Unscientific Results during Benchmark") -} - -func benchInit() { - benchTs = newTestStruc(benchDepth, true) - approxSize = approxDataSize(reflect.ValueOf(benchTs)) - bytesLen := 1024 * 4 * (benchDepth + 1) * (benchDepth + 1) - if bytesLen < approxSize { - bytesLen = approxSize - } - - benchCheckers = append(benchCheckers, - benchChecker{"msgpack", fnMsgpackEncodeFn, fnMsgpackDecodeFn}, - benchChecker{"binc-nosym", fnBincNoSymEncodeFn, fnBincNoSymDecodeFn}, - benchChecker{"binc-sym", fnBincSymEncodeFn, fnBincSymDecodeFn}, - benchChecker{"simple", fnSimpleEncodeFn, fnSimpleDecodeFn}, - benchChecker{"gob", fnGobEncodeFn, fnGobDecodeFn}, - benchChecker{"json", fnJsonEncodeFn, fnJsonDecodeFn}, - ) - if benchDoInitBench { - runBenchInit() - } -} - -func runBenchInit() { - logT(nil, "..............................................") - logT(nil, "BENCHMARK INIT: %v", time.Now()) - logT(nil, "To run full benchmark comparing encodings (MsgPack, Binc, Simple, JSON, GOB, etc), "+ - "use: \"go test -bench=.\"") - logT(nil, "Benchmark: ") - logT(nil, "\tStruct recursive Depth: %d", benchDepth) - if approxSize > 0 { - logT(nil, "\tApproxDeepSize Of benchmark Struct: %d bytes", approxSize) - } - if benchUnscientificRes { - logT(nil, "Benchmark One-Pass Run (with Unscientific Encode/Decode times): ") - } else { - logT(nil, "Benchmark One-Pass Run:") - } - for _, bc := range benchCheckers { - doBenchCheck(bc.name, bc.encodefn, bc.decodefn) - } - logT(nil, "..............................................") - if benchInitDebug { - logT(nil, "<<<<====>>>> depth: %v, ts: %#v\n", benchDepth, benchTs) - } -} - -func fnBenchNewTs() interface{} { - return new(TestStruc) -} - -func doBenchCheck(name string, encfn benchEncFn, decfn benchDecFn) { - runtime.GC() - tnow := time.Now() - buf, err := encfn(benchTs) - if err != nil { - logT(nil, "\t%10s: **** Error encoding benchTs: %v", name, err) - } - encDur := time.Now().Sub(tnow) - encLen := len(buf) - runtime.GC() - if !benchUnscientificRes { - logT(nil, "\t%10s: len: %d bytes\n", name, encLen) - return - } - tnow = time.Now() - if err = decfn(buf, new(TestStruc)); err != nil { - logT(nil, "\t%10s: **** Error decoding into new TestStruc: %v", name, err) - } - decDur := time.Now().Sub(tnow) - logT(nil, "\t%10s: len: %d bytes, encode: %v, decode: %v\n", name, encLen, encDur, decDur) -} - -func fnBenchmarkEncode(b *testing.B, encName string, ts interface{}, encfn benchEncFn) { - runtime.GC() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := encfn(ts) - if err != nil { - logT(b, "Error encoding benchTs: %s: %v", encName, err) - b.FailNow() - } - } -} - -func fnBenchmarkDecode(b *testing.B, encName string, ts interface{}, - encfn benchEncFn, decfn benchDecFn, newfn benchIntfFn, -) { - buf, err := encfn(ts) - if err != nil { - logT(b, "Error encoding benchTs: %s: %v", encName, err) - b.FailNow() - } - runtime.GC() - b.ResetTimer() - for i := 0; i < b.N; i++ { - ts = newfn() - if err = decfn(buf, ts); err != nil { - logT(b, "Error decoding into new TestStruc: %s: %v", encName, err) - b.FailNow() - } - if benchVerify { - if vts, vok := ts.(*TestStruc); vok { - verifyTsTree(b, vts) - } - } - } -} - -func verifyTsTree(b *testing.B, ts *TestStruc) { - var ts0, ts1m, ts2m, ts1s, ts2s *TestStruc - ts0 = ts - - if benchDepth > 0 { - ts1m, ts1s = verifyCheckAndGet(b, ts0) - } - - if benchDepth > 1 { - ts2m, ts2s = verifyCheckAndGet(b, ts1m) - } - for _, tsx := range []*TestStruc{ts0, ts1m, ts2m, ts1s, ts2s} { - if tsx != nil { - verifyOneOne(b, tsx) - } - } -} - -func verifyCheckAndGet(b *testing.B, ts0 *TestStruc) (ts1m *TestStruc, ts1s *TestStruc) { - // if len(ts1m.Ms) <= 2 { - // logT(b, "Error: ts1m.Ms len should be > 2. Got: %v", len(ts1m.Ms)) - // b.FailNow() - // } - if len(ts0.Its) == 0 { - logT(b, "Error: ts0.Islice len should be > 0. Got: %v", len(ts0.Its)) - b.FailNow() - } - ts1m = ts0.Mtsptr["0"] - ts1s = ts0.Its[0] - if ts1m == nil || ts1s == nil { - logT(b, "Error: At benchDepth 1, No *TestStruc found") - b.FailNow() - } - return -} - -func verifyOneOne(b *testing.B, ts *TestStruc) { - if ts.I64slice[2] != int64(3) { - logT(b, "Error: Decode failed by checking values") - b.FailNow() - } -} - -func fnMsgpackEncodeFn(ts interface{}) (bs []byte, err error) { - err = NewEncoderBytes(&bs, testMsgpackH).Encode(ts) - return -} - -func fnMsgpackDecodeFn(buf []byte, ts interface{}) error { - return NewDecoderBytes(buf, testMsgpackH).Decode(ts) -} - -func fnBincEncodeFn(ts interface{}, sym AsSymbolFlag) (bs []byte, err error) { - tSym := testBincH.AsSymbols - testBincH.AsSymbols = sym - err = NewEncoderBytes(&bs, testBincH).Encode(ts) - testBincH.AsSymbols = tSym - return -} - -func fnBincDecodeFn(buf []byte, ts interface{}, sym AsSymbolFlag) (err error) { - tSym := testBincH.AsSymbols - testBincH.AsSymbols = sym - err = NewDecoderBytes(buf, testBincH).Decode(ts) - testBincH.AsSymbols = tSym - return -} - -func fnBincNoSymEncodeFn(ts interface{}) (bs []byte, err error) { - return fnBincEncodeFn(ts, AsSymbolNone) -} - -func fnBincNoSymDecodeFn(buf []byte, ts interface{}) error { - return fnBincDecodeFn(buf, ts, AsSymbolNone) -} - -func fnBincSymEncodeFn(ts interface{}) (bs []byte, err error) { - return fnBincEncodeFn(ts, AsSymbolAll) -} - -func fnBincSymDecodeFn(buf []byte, ts interface{}) error { - return fnBincDecodeFn(buf, ts, AsSymbolAll) -} - -func fnSimpleEncodeFn(ts interface{}) (bs []byte, err error) { - err = NewEncoderBytes(&bs, testSimpleH).Encode(ts) - return -} - -func fnSimpleDecodeFn(buf []byte, ts interface{}) error { - return NewDecoderBytes(buf, testSimpleH).Decode(ts) -} - -func fnGobEncodeFn(ts interface{}) ([]byte, error) { - bbuf := new(bytes.Buffer) - err := gob.NewEncoder(bbuf).Encode(ts) - return bbuf.Bytes(), err -} - -func fnGobDecodeFn(buf []byte, ts interface{}) error { - return gob.NewDecoder(bytes.NewBuffer(buf)).Decode(ts) -} - -func fnJsonEncodeFn(ts interface{}) ([]byte, error) { - return json.Marshal(ts) -} - -func fnJsonDecodeFn(buf []byte, ts interface{}) error { - return json.Unmarshal(buf, ts) -} - -func Benchmark__Msgpack____Encode(b *testing.B) { - fnBenchmarkEncode(b, "msgpack", benchTs, fnMsgpackEncodeFn) -} - -func Benchmark__Msgpack____Decode(b *testing.B) { - fnBenchmarkDecode(b, "msgpack", benchTs, fnMsgpackEncodeFn, fnMsgpackDecodeFn, fnBenchNewTs) -} - -func Benchmark__Binc_NoSym_Encode(b *testing.B) { - fnBenchmarkEncode(b, "binc", benchTs, fnBincNoSymEncodeFn) -} - -func Benchmark__Binc_NoSym_Decode(b *testing.B) { - fnBenchmarkDecode(b, "binc", benchTs, fnBincNoSymEncodeFn, fnBincNoSymDecodeFn, fnBenchNewTs) -} - -func Benchmark__Binc_Sym___Encode(b *testing.B) { - fnBenchmarkEncode(b, "binc", benchTs, fnBincSymEncodeFn) -} - -func Benchmark__Binc_Sym___Decode(b *testing.B) { - fnBenchmarkDecode(b, "binc", benchTs, fnBincSymEncodeFn, fnBincSymDecodeFn, fnBenchNewTs) -} - -func Benchmark__Simple____Encode(b *testing.B) { - fnBenchmarkEncode(b, "simple", benchTs, fnSimpleEncodeFn) -} - -func Benchmark__Simple____Decode(b *testing.B) { - fnBenchmarkDecode(b, "simple", benchTs, fnSimpleEncodeFn, fnSimpleDecodeFn, fnBenchNewTs) -} - -func Benchmark__Gob________Encode(b *testing.B) { - fnBenchmarkEncode(b, "gob", benchTs, fnGobEncodeFn) -} - -func Benchmark__Gob________Decode(b *testing.B) { - fnBenchmarkDecode(b, "gob", benchTs, fnGobEncodeFn, fnGobDecodeFn, fnBenchNewTs) -} - -func Benchmark__Json_______Encode(b *testing.B) { - fnBenchmarkEncode(b, "json", benchTs, fnJsonEncodeFn) -} - -func Benchmark__Json_______Decode(b *testing.B) { - fnBenchmarkDecode(b, "json", benchTs, fnJsonEncodeFn, fnJsonDecodeFn, fnBenchNewTs) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/binc.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/binc.go deleted file mode 100644 index 2bb5e8fee..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/binc.go +++ /dev/null @@ -1,786 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "math" - // "reflect" - // "sync/atomic" - "time" - //"fmt" -) - -const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. - -//var _ = fmt.Printf - -// vd as low 4 bits (there are 16 slots) -const ( - bincVdSpecial byte = iota - bincVdPosInt - bincVdNegInt - bincVdFloat - - bincVdString - bincVdByteArray - bincVdArray - bincVdMap - - bincVdTimestamp - bincVdSmallInt - bincVdUnicodeOther - bincVdSymbol - - bincVdDecimal - _ // open slot - _ // open slot - bincVdCustomExt = 0x0f -) - -const ( - bincSpNil byte = iota - bincSpFalse - bincSpTrue - bincSpNan - bincSpPosInf - bincSpNegInf - bincSpZeroFloat - bincSpZero - bincSpNegOne -) - -const ( - bincFlBin16 byte = iota - bincFlBin32 - _ // bincFlBin32e - bincFlBin64 - _ // bincFlBin64e - // others not currently supported -) - -type bincEncDriver struct { - w encWriter - m map[string]uint16 // symbols - s uint32 // symbols sequencer - b [8]byte -} - -func (e *bincEncDriver) isBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (e *bincEncDriver) encodeBuiltin(rt uintptr, v interface{}) { - switch rt { - case timeTypId: - bs := encodeTime(v.(time.Time)) - e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) - e.w.writeb(bs) - } -} - -func (e *bincEncDriver) encodeNil() { - e.w.writen1(bincVdSpecial<<4 | bincSpNil) -} - -func (e *bincEncDriver) encodeBool(b bool) { - if b { - e.w.writen1(bincVdSpecial<<4 | bincSpTrue) - } else { - e.w.writen1(bincVdSpecial<<4 | bincSpFalse) - } -} - -func (e *bincEncDriver) encodeFloat32(f float32) { - if f == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) - return - } - e.w.writen1(bincVdFloat<<4 | bincFlBin32) - e.w.writeUint32(math.Float32bits(f)) -} - -func (e *bincEncDriver) encodeFloat64(f float64) { - if f == 0 { - e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) - return - } - bigen.PutUint64(e.b[:], math.Float64bits(f)) - if bincDoPrune { - i := 7 - for ; i >= 0 && (e.b[i] == 0); i-- { - } - i++ - if i <= 6 { - e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) - e.w.writen1(byte(i)) - e.w.writeb(e.b[:i]) - return - } - } - e.w.writen1(bincVdFloat<<4 | bincFlBin64) - e.w.writeb(e.b[:]) -} - -func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { - if lim == 4 { - bigen.PutUint32(e.b[:lim], uint32(v)) - } else { - bigen.PutUint64(e.b[:lim], v) - } - if bincDoPrune { - i := pruneSignExt(e.b[:lim], pos) - e.w.writen1(bd | lim - 1 - byte(i)) - e.w.writeb(e.b[i:lim]) - } else { - e.w.writen1(bd | lim - 1) - e.w.writeb(e.b[:lim]) - } -} - -func (e *bincEncDriver) encodeInt(v int64) { - const nbd byte = bincVdNegInt << 4 - switch { - case v >= 0: - e.encUint(bincVdPosInt<<4, true, uint64(v)) - case v == -1: - e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) - default: - e.encUint(bincVdNegInt<<4, false, uint64(-v)) - } -} - -func (e *bincEncDriver) encodeUint(v uint64) { - e.encUint(bincVdPosInt<<4, true, v) -} - -func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { - switch { - case v == 0: - e.w.writen1(bincVdSpecial<<4 | bincSpZero) - case pos && v >= 1 && v <= 16: - e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) - case v <= math.MaxUint8: - e.w.writen2(bd|0x0, byte(v)) - case v <= math.MaxUint16: - e.w.writen1(bd | 0x01) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: - e.encIntegerPrune(bd, pos, v, 4) - default: - e.encIntegerPrune(bd, pos, v, 8) - } -} - -func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { - e.encLen(bincVdCustomExt<<4, uint64(length)) - e.w.writen1(xtag) -} - -func (e *bincEncDriver) encodeArrayPreamble(length int) { - e.encLen(bincVdArray<<4, uint64(length)) -} - -func (e *bincEncDriver) encodeMapPreamble(length int) { - e.encLen(bincVdMap<<4, uint64(length)) -} - -func (e *bincEncDriver) encodeString(c charEncoding, v string) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writestr(v) - } -} - -func (e *bincEncDriver) encodeSymbol(v string) { - // if WriteSymbolsNoRefs { - // e.encodeString(c_UTF8, v) - // return - // } - - //symbols only offer benefit when string length > 1. - //This is because strings with length 1 take only 2 bytes to store - //(bd with embedded length, and single byte for string val). - - l := len(v) - switch l { - case 0: - e.encBytesLen(c_UTF8, 0) - return - case 1: - e.encBytesLen(c_UTF8, 1) - e.w.writen1(v[0]) - return - } - if e.m == nil { - e.m = make(map[string]uint16, 16) - } - ui, ok := e.m[v] - if ok { - if ui <= math.MaxUint8 { - e.w.writen2(bincVdSymbol<<4, byte(ui)) - } else { - e.w.writen1(bincVdSymbol<<4 | 0x8) - e.w.writeUint16(ui) - } - } else { - e.s++ - ui = uint16(e.s) - //ui = uint16(atomic.AddUint32(&e.s, 1)) - e.m[v] = ui - var lenprec uint8 - switch { - case l <= math.MaxUint8: - // lenprec = 0 - case l <= math.MaxUint16: - lenprec = 1 - case int64(l) <= math.MaxUint32: - lenprec = 2 - default: - lenprec = 3 - } - if ui <= math.MaxUint8 { - e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) - } else { - e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) - e.w.writeUint16(ui) - } - switch lenprec { - case 0: - e.w.writen1(byte(l)) - case 1: - e.w.writeUint16(uint16(l)) - case 2: - e.w.writeUint32(uint32(l)) - default: - e.w.writeUint64(uint64(l)) - } - e.w.writestr(v) - } -} - -func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) { - l := uint64(len(v)) - e.encBytesLen(c, l) - if l > 0 { - e.w.writeb(v) - } -} - -func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { - //TODO: support bincUnicodeOther (for now, just use string or bytearray) - if c == c_RAW { - e.encLen(bincVdByteArray<<4, length) - } else { - e.encLen(bincVdString<<4, length) - } -} - -func (e *bincEncDriver) encLen(bd byte, l uint64) { - if l < 12 { - e.w.writen1(bd | uint8(l+4)) - } else { - e.encLenNumber(bd, l) - } -} - -func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { - switch { - case v <= math.MaxUint8: - e.w.writen2(bd, byte(v)) - case v <= math.MaxUint16: - e.w.writen1(bd | 0x01) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: - e.w.writen1(bd | 0x02) - e.w.writeUint32(uint32(v)) - default: - e.w.writen1(bd | 0x03) - e.w.writeUint64(uint64(v)) - } -} - -//------------------------------------ - -type bincDecDriver struct { - r decReader - bdRead bool - bdType valueType - bd byte - vd byte - vs byte - b [8]byte - m map[uint32]string // symbols (use uint32 as key, as map optimizes for it) -} - -func (d *bincDecDriver) initReadNext() { - if d.bdRead { - return - } - d.bd = d.r.readn1() - d.vd = d.bd >> 4 - d.vs = d.bd & 0x0f - d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *bincDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - switch d.vd { - case bincVdSpecial: - switch d.vs { - case bincSpNil: - d.bdType = valueTypeNil - case bincSpFalse, bincSpTrue: - d.bdType = valueTypeBool - case bincSpNan, bincSpNegInf, bincSpPosInf, bincSpZeroFloat: - d.bdType = valueTypeFloat - case bincSpZero: - d.bdType = valueTypeUint - case bincSpNegOne: - d.bdType = valueTypeInt - default: - decErr("currentEncodedType: Unrecognized special value 0x%x", d.vs) - } - case bincVdSmallInt: - d.bdType = valueTypeUint - case bincVdPosInt: - d.bdType = valueTypeUint - case bincVdNegInt: - d.bdType = valueTypeInt - case bincVdFloat: - d.bdType = valueTypeFloat - case bincVdString: - d.bdType = valueTypeString - case bincVdSymbol: - d.bdType = valueTypeSymbol - case bincVdByteArray: - d.bdType = valueTypeBytes - case bincVdTimestamp: - d.bdType = valueTypeTimestamp - case bincVdCustomExt: - d.bdType = valueTypeExt - case bincVdArray: - d.bdType = valueTypeArray - case bincVdMap: - d.bdType = valueTypeMap - default: - decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.vd) - } - } - return d.bdType -} - -func (d *bincDecDriver) tryDecodeAsNil() bool { - if d.bd == bincVdSpecial<<4|bincSpNil { - d.bdRead = false - return true - } - return false -} - -func (d *bincDecDriver) isBuiltinType(rt uintptr) bool { - return rt == timeTypId -} - -func (d *bincDecDriver) decodeBuiltin(rt uintptr, v interface{}) { - switch rt { - case timeTypId: - if d.vd != bincVdTimestamp { - decErr("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) - } - tt, err := decodeTime(d.r.readn(int(d.vs))) - if err != nil { - panic(err) - } - var vt *time.Time = v.(*time.Time) - *vt = tt - d.bdRead = false - } -} - -func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { - if vs&0x8 == 0 { - d.r.readb(d.b[0:defaultLen]) - } else { - l := d.r.readn1() - if l > 8 { - decErr("At most 8 bytes used to represent float. Received: %v bytes", l) - } - for i := l; i < 8; i++ { - d.b[i] = 0 - } - d.r.readb(d.b[0:l]) - } -} - -func (d *bincDecDriver) decFloat() (f float64) { - //if true { f = math.Float64frombits(d.r.readUint64()); break; } - switch vs := d.vs; vs & 0x7 { - case bincFlBin32: - d.decFloatPre(vs, 4) - f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) - case bincFlBin64: - d.decFloatPre(vs, 8) - f = math.Float64frombits(bigen.Uint64(d.b[0:8])) - default: - decErr("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) - } - return -} - -func (d *bincDecDriver) decUint() (v uint64) { - // need to inline the code (interface conversion and type assertion expensive) - switch d.vs { - case 0: - v = uint64(d.r.readn1()) - case 1: - d.r.readb(d.b[6:]) - v = uint64(bigen.Uint16(d.b[6:])) - case 2: - d.b[4] = 0 - d.r.readb(d.b[5:]) - v = uint64(bigen.Uint32(d.b[4:])) - case 3: - d.r.readb(d.b[4:]) - v = uint64(bigen.Uint32(d.b[4:])) - case 4, 5, 6: - lim := int(7 - d.vs) - d.r.readb(d.b[lim:]) - for i := 0; i < lim; i++ { - d.b[i] = 0 - } - v = uint64(bigen.Uint64(d.b[:])) - case 7: - d.r.readb(d.b[:]) - v = uint64(bigen.Uint64(d.b[:])) - default: - decErr("unsigned integers with greater than 64 bits of precision not supported") - } - return -} - -func (d *bincDecDriver) decIntAny() (ui uint64, i int64, neg bool) { - switch d.vd { - case bincVdPosInt: - ui = d.decUint() - i = int64(ui) - case bincVdNegInt: - ui = d.decUint() - i = -(int64(ui)) - neg = true - case bincVdSmallInt: - i = int64(d.vs) + 1 - ui = uint64(d.vs) + 1 - case bincVdSpecial: - switch d.vs { - case bincSpZero: - //i = 0 - case bincSpNegOne: - neg = true - ui = 1 - i = -1 - default: - decErr("numeric decode fails for special value: d.vs: 0x%x", d.vs) - } - default: - decErr("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) - } - return -} - -func (d *bincDecDriver) decodeInt(bitsize uint8) (i int64) { - _, i, _ = d.decIntAny() - checkOverflow(0, i, bitsize) - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeUint(bitsize uint8) (ui uint64) { - ui, i, neg := d.decIntAny() - if neg { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - checkOverflow(ui, 0, bitsize) - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.vd { - case bincVdSpecial: - d.bdRead = false - switch d.vs { - case bincSpNan: - return math.NaN() - case bincSpPosInf: - return math.Inf(1) - case bincSpZeroFloat, bincSpZero: - return - case bincSpNegInf: - return math.Inf(-1) - default: - decErr("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) - } - case bincVdFloat: - f = d.decFloat() - default: - _, i, _ := d.decIntAny() - f = float64(i) - } - checkOverflowFloat32(f, chkOverflow32) - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *bincDecDriver) decodeBool() (b bool) { - switch d.bd { - case (bincVdSpecial | bincSpFalse): - // b = false - case (bincVdSpecial | bincSpTrue): - b = true - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - } - d.bdRead = false - return -} - -func (d *bincDecDriver) readMapLen() (length int) { - if d.vd != bincVdMap { - decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) - } - length = d.decLen() - d.bdRead = false - return -} - -func (d *bincDecDriver) readArrayLen() (length int) { - if d.vd != bincVdArray { - decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) - } - length = d.decLen() - d.bdRead = false - return -} - -func (d *bincDecDriver) decLen() int { - if d.vs <= 3 { - return int(d.decUint()) - } - return int(d.vs - 4) -} - -func (d *bincDecDriver) decodeString() (s string) { - switch d.vd { - case bincVdString, bincVdByteArray: - if length := d.decLen(); length > 0 { - s = string(d.r.readn(length)) - } - case bincVdSymbol: - //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, - //extract symbol - //if containsStringVal, read it and put in map - //else look in map for string value - var symbol uint32 - vs := d.vs - //fmt.Printf(">>>> d.vs: 0b%b, & 0x8: %v, & 0x4: %v\n", d.vs, vs & 0x8, vs & 0x4) - if vs&0x8 == 0 { - symbol = uint32(d.r.readn1()) - } else { - symbol = uint32(d.r.readUint16()) - } - if d.m == nil { - d.m = make(map[uint32]string, 16) - } - - if vs&0x4 == 0 { - s = d.m[symbol] - } else { - var slen int - switch vs & 0x3 { - case 0: - slen = int(d.r.readn1()) - case 1: - slen = int(d.r.readUint16()) - case 2: - slen = int(d.r.readUint32()) - case 3: - slen = int(d.r.readUint64()) - } - s = string(d.r.readn(slen)) - d.m[symbol] = s - } - default: - decErr("Invalid d.vd for string. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, bincVdSymbol, d.vd) - } - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { - var clen int - switch d.vd { - case bincVdString, bincVdByteArray: - clen = d.decLen() - default: - decErr("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", - bincVdString, bincVdByteArray, d.vd) - } - if clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true - } - d.r.readb(bs) - } - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - switch d.vd { - case bincVdCustomExt: - l := d.decLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - } - xbs = d.r.readn(l) - case bincVdByteArray: - xbs, _ = d.decodeBytes(nil) - default: - decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) - } - d.bdRead = false - return -} - -func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() - - switch d.vd { - case bincVdSpecial: - switch d.vs { - case bincSpNil: - vt = valueTypeNil - case bincSpFalse: - vt = valueTypeBool - v = false - case bincSpTrue: - vt = valueTypeBool - v = true - case bincSpNan: - vt = valueTypeFloat - v = math.NaN() - case bincSpPosInf: - vt = valueTypeFloat - v = math.Inf(1) - case bincSpNegInf: - vt = valueTypeFloat - v = math.Inf(-1) - case bincSpZeroFloat: - vt = valueTypeFloat - v = float64(0) - case bincSpZero: - vt = valueTypeUint - v = int64(0) // int8(0) - case bincSpNegOne: - vt = valueTypeInt - v = int64(-1) // int8(-1) - default: - decErr("decodeNaked: Unrecognized special value 0x%x", d.vs) - } - case bincVdSmallInt: - vt = valueTypeUint - v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 - case bincVdPosInt: - vt = valueTypeUint - v = d.decUint() - case bincVdNegInt: - vt = valueTypeInt - v = -(int64(d.decUint())) - case bincVdFloat: - vt = valueTypeFloat - v = d.decFloat() - case bincVdSymbol: - vt = valueTypeSymbol - v = d.decodeString() - case bincVdString: - vt = valueTypeString - v = d.decodeString() - case bincVdByteArray: - vt = valueTypeBytes - v, _ = d.decodeBytes(nil) - case bincVdTimestamp: - vt = valueTypeTimestamp - tt, err := decodeTime(d.r.readn(int(d.vs))) - if err != nil { - panic(err) - } - v = tt - case bincVdCustomExt: - vt = valueTypeExt - l := d.decLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(l) - v = &re - vt = valueTypeExt - case bincVdArray: - vt = valueTypeArray - decodeFurther = true - case bincVdMap: - vt = valueTypeMap - decodeFurther = true - default: - decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) - } - - if !decodeFurther { - d.bdRead = false - } - return -} - -//------------------------------------ - -//BincHandle is a Handle for the Binc Schema-Free Encoding Format -//defined at https://github.com/ugorji/binc . -// -//BincHandle currently supports all Binc features with the following EXCEPTIONS: -// - only integers up to 64 bits of precision are supported. -// big integers are unsupported. -// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). -// extended precision and decimal IEEE 754 floats are unsupported. -// - Only UTF-8 strings supported. -// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. -//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. -type BincHandle struct { - BasicHandle -} - -func (h *BincHandle) newEncDriver(w encWriter) encDriver { - return &bincEncDriver{w: w} -} - -func (h *BincHandle) newDecDriver(r decReader) decDriver { - return &bincDecDriver{r: r} -} - -func (_ *BincHandle) writeExt() bool { - return true -} - -func (h *BincHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/codecs_test.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/codecs_test.go deleted file mode 100644 index cb184491f..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/codecs_test.go +++ /dev/null @@ -1,1002 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -// Test works by using a slice of interfaces. -// It can test for encoding/decoding into/from a nil interface{} -// or passing the object to encode/decode into. -// -// There are basically 2 main tests here. -// First test internally encodes and decodes things and verifies that -// the artifact was as expected. -// Second test will use python msgpack to create a bunch of golden files, -// read those files, and compare them to what it should be. It then -// writes those files back out and compares the byte streams. -// -// Taken together, the tests are pretty extensive. - -import ( - "bytes" - "encoding/gob" - "flag" - "fmt" - "io/ioutil" - "math" - "net" - "net/rpc" - "os" - "os/exec" - "path/filepath" - "reflect" - "runtime" - "strconv" - "sync/atomic" - "testing" - "time" -) - -type testVerifyArg int - -const ( - testVerifyMapTypeSame testVerifyArg = iota - testVerifyMapTypeStrIntf - testVerifyMapTypeIntfIntf - // testVerifySliceIntf - testVerifyForPython -) - -var ( - testInitDebug bool - testUseIoEncDec bool - testStructToArray bool - testWriteNoSymbols bool - - _ = fmt.Printf - skipVerifyVal interface{} = &(struct{}{}) - - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - timeLoc = time.FixedZone("", -8*60*60) // UTC-08:00 //time.UTC-8 - timeToCompare1 = time.Date(2012, 2, 2, 2, 2, 2, 2000, timeLoc) - timeToCompare2 = time.Date(1900, 2, 2, 2, 2, 2, 2000, timeLoc) - timeToCompare3 = time.Unix(0, 0).UTC() - timeToCompare4 = time.Time{}.UTC() - - table []interface{} // main items we encode - tableVerify []interface{} // we verify encoded things against this after decode - tableTestNilVerify []interface{} // for nil interface, use this to verify (rules are different) - tablePythonVerify []interface{} // for verifying for python, since Python sometimes - // will encode a float32 as float64, or large int as uint - testRpcInt = new(TestRpcInt) - testMsgpackH = &MsgpackHandle{} - testBincH = &BincHandle{} - testSimpleH = &SimpleHandle{} -) - -func testInitFlags() { - // delete(testDecOpts.ExtFuncs, timeTyp) - flag.BoolVar(&testInitDebug, "tg", false, "Test Debug") - flag.BoolVar(&testUseIoEncDec, "ti", false, "Use IO Reader/Writer for Marshal/Unmarshal") - flag.BoolVar(&testStructToArray, "ts", false, "Set StructToArray option") - flag.BoolVar(&testWriteNoSymbols, "tn", false, "Set NoSymbols option") -} - -type AnonInTestStruc struct { - AS string - AI64 int64 - AI16 int16 - AUi64 uint64 - ASslice []string - AI64slice []int64 -} - -type TestStruc struct { - S string - I64 int64 - I16 int16 - Ui64 uint64 - Ui8 uint8 - B bool - By byte - - Sslice []string - I64slice []int64 - I16slice []int16 - Ui64slice []uint64 - Ui8slice []uint8 - Bslice []bool - Byslice []byte - - Islice []interface{} - Iptrslice []*int64 - - AnonInTestStruc - - //M map[interface{}]interface{} `json:"-",bson:"-"` - Ms map[string]interface{} - Msi64 map[string]int64 - - Nintf interface{} //don't set this, so we can test for nil - T time.Time - Nmap map[string]bool //don't set this, so we can test for nil - Nslice []byte //don't set this, so we can test for nil - Nint64 *int64 //don't set this, so we can test for nil - Mtsptr map[string]*TestStruc - Mts map[string]TestStruc - Its []*TestStruc - Nteststruc *TestStruc -} - -type TestABC struct { - A, B, C string -} - -type TestRpcInt struct { - i int -} - -func (r *TestRpcInt) Update(n int, res *int) error { r.i = n; *res = r.i; return nil } -func (r *TestRpcInt) Square(ignore int, res *int) error { *res = r.i * r.i; return nil } -func (r *TestRpcInt) Mult(n int, res *int) error { *res = r.i * n; return nil } -func (r *TestRpcInt) EchoStruct(arg TestABC, res *string) error { - *res = fmt.Sprintf("%#v", arg) - return nil -} -func (r *TestRpcInt) Echo123(args []string, res *string) error { - *res = fmt.Sprintf("%#v", args) - return nil -} - -func testVerifyVal(v interface{}, arg testVerifyArg) (v2 interface{}) { - //for python msgpack, - // - all positive integers are unsigned 64-bit ints - // - all floats are float64 - switch iv := v.(type) { - case int8: - if iv > 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case int16: - if iv > 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case int32: - if iv > 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case int64: - if iv > 0 { - v2 = uint64(iv) - } else { - v2 = int64(iv) - } - case uint8: - v2 = uint64(iv) - case uint16: - v2 = uint64(iv) - case uint32: - v2 = uint64(iv) - case uint64: - v2 = uint64(iv) - case float32: - v2 = float64(iv) - case float64: - v2 = float64(iv) - case []interface{}: - m2 := make([]interface{}, len(iv)) - for j, vj := range iv { - m2[j] = testVerifyVal(vj, arg) - } - v2 = m2 - case map[string]bool: - switch arg { - case testVerifyMapTypeSame: - m2 := make(map[string]bool) - for kj, kv := range iv { - m2[kj] = kv - } - v2 = m2 - case testVerifyMapTypeStrIntf, testVerifyForPython: - m2 := make(map[string]interface{}) - for kj, kv := range iv { - m2[kj] = kv - } - v2 = m2 - case testVerifyMapTypeIntfIntf: - m2 := make(map[interface{}]interface{}) - for kj, kv := range iv { - m2[kj] = kv - } - v2 = m2 - } - case map[string]interface{}: - switch arg { - case testVerifyMapTypeSame: - m2 := make(map[string]interface{}) - for kj, kv := range iv { - m2[kj] = testVerifyVal(kv, arg) - } - v2 = m2 - case testVerifyMapTypeStrIntf, testVerifyForPython: - m2 := make(map[string]interface{}) - for kj, kv := range iv { - m2[kj] = testVerifyVal(kv, arg) - } - v2 = m2 - case testVerifyMapTypeIntfIntf: - m2 := make(map[interface{}]interface{}) - for kj, kv := range iv { - m2[kj] = testVerifyVal(kv, arg) - } - v2 = m2 - } - case map[interface{}]interface{}: - m2 := make(map[interface{}]interface{}) - for kj, kv := range iv { - m2[testVerifyVal(kj, arg)] = testVerifyVal(kv, arg) - } - v2 = m2 - case time.Time: - switch arg { - case testVerifyForPython: - if iv2 := iv.UnixNano(); iv2 > 0 { - v2 = uint64(iv2) - } else { - v2 = int64(iv2) - } - default: - v2 = v - } - default: - v2 = v - } - return -} - -func testInit() { - gob.Register(new(TestStruc)) - if testInitDebug { - ts0 := newTestStruc(2, false) - fmt.Printf("====> depth: %v, ts: %#v\n", 2, ts0) - } - - testBincH.StructToArray = testStructToArray - if testWriteNoSymbols { - testBincH.AsSymbols = AsSymbolNone - } else { - testBincH.AsSymbols = AsSymbolAll - } - testMsgpackH.StructToArray = testStructToArray - testMsgpackH.RawToString = true - // testMsgpackH.AddExt(byteSliceTyp, 0, testMsgpackH.BinaryEncodeExt, testMsgpackH.BinaryDecodeExt) - // testMsgpackH.AddExt(timeTyp, 1, testMsgpackH.TimeEncodeExt, testMsgpackH.TimeDecodeExt) - timeEncExt := func(rv reflect.Value) ([]byte, error) { - return encodeTime(rv.Interface().(time.Time)), nil - } - timeDecExt := func(rv reflect.Value, bs []byte) error { - tt, err := decodeTime(bs) - if err == nil { - rv.Set(reflect.ValueOf(tt)) - } - return err - } - - // add extensions for msgpack, simple for time.Time, so we can encode/decode same way. - testMsgpackH.AddExt(timeTyp, 1, timeEncExt, timeDecExt) - testSimpleH.AddExt(timeTyp, 1, timeEncExt, timeDecExt) - - primitives := []interface{}{ - int8(-8), - int16(-1616), - int32(-32323232), - int64(-6464646464646464), - uint8(192), - uint16(1616), - uint32(32323232), - uint64(6464646464646464), - byte(192), - float32(-3232.0), - float64(-6464646464.0), - float32(3232.0), - float64(6464646464.0), - false, - true, - nil, - "someday", - "", - "bytestring", - timeToCompare1, - timeToCompare2, - timeToCompare3, - timeToCompare4, - } - mapsAndStrucs := []interface{}{ - map[string]bool{ - "true": true, - "false": false, - }, - map[string]interface{}{ - "true": "True", - "false": false, - "uint16(1616)": uint16(1616), - }, - //add a complex combo map in here. (map has list which has map) - //note that after the first thing, everything else should be generic. - map[string]interface{}{ - "list": []interface{}{ - int16(1616), - int32(32323232), - true, - float32(-3232.0), - map[string]interface{}{ - "TRUE": true, - "FALSE": false, - }, - []interface{}{true, false}, - }, - "int32": int32(32323232), - "bool": true, - "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", - "SHORT STRING": "1234567890", - }, - map[interface{}]interface{}{ - true: "true", - uint8(138): false, - "false": uint8(200), - }, - newTestStruc(0, false), - } - - table = []interface{}{} - table = append(table, primitives...) //0-19 are primitives - table = append(table, primitives) //20 is a list of primitives - table = append(table, mapsAndStrucs...) //21-24 are maps. 25 is a *struct - - tableVerify = make([]interface{}, len(table)) - tableTestNilVerify = make([]interface{}, len(table)) - tablePythonVerify = make([]interface{}, len(table)) - - lp := len(primitives) - av := tableVerify - for i, v := range table { - if i == lp+3 { - av[i] = skipVerifyVal - continue - } - //av[i] = testVerifyVal(v, testVerifyMapTypeSame) - switch v.(type) { - case []interface{}: - av[i] = testVerifyVal(v, testVerifyMapTypeSame) - case map[string]interface{}: - av[i] = testVerifyVal(v, testVerifyMapTypeSame) - case map[interface{}]interface{}: - av[i] = testVerifyVal(v, testVerifyMapTypeSame) - default: - av[i] = v - } - } - - av = tableTestNilVerify - for i, v := range table { - if i > lp+3 { - av[i] = skipVerifyVal - continue - } - av[i] = testVerifyVal(v, testVerifyMapTypeStrIntf) - } - - av = tablePythonVerify - for i, v := range table { - if i > lp+3 { - av[i] = skipVerifyVal - continue - } - av[i] = testVerifyVal(v, testVerifyForPython) - } - - tablePythonVerify = tablePythonVerify[:24] -} - -func testUnmarshal(v interface{}, data []byte, h Handle) error { - if testUseIoEncDec { - return NewDecoder(bytes.NewBuffer(data), h).Decode(v) - } - return NewDecoderBytes(data, h).Decode(v) -} - -func testMarshal(v interface{}, h Handle) (bs []byte, err error) { - if testUseIoEncDec { - var buf bytes.Buffer - err = NewEncoder(&buf, h).Encode(v) - bs = buf.Bytes() - return - } - err = NewEncoderBytes(&bs, h).Encode(v) - return -} - -func testMarshalErr(v interface{}, h Handle, t *testing.T, name string) (bs []byte, err error) { - if bs, err = testMarshal(v, h); err != nil { - logT(t, "Error encoding %s: %v, Err: %v", name, v, err) - t.FailNow() - } - return -} - -func testUnmarshalErr(v interface{}, data []byte, h Handle, t *testing.T, name string) (err error) { - if err = testUnmarshal(v, data, h); err != nil { - logT(t, "Error Decoding into %s: %v, Err: %v", name, v, err) - t.FailNow() - } - return -} - -func newTestStruc(depth int, bench bool) (ts *TestStruc) { - var i64a, i64b, i64c, i64d int64 = 64, 6464, 646464, 64646464 - - ts = &TestStruc{ - S: "some string", - I64: math.MaxInt64 * 2 / 3, // 64, - I16: 16, - Ui64: uint64(int64(math.MaxInt64 * 2 / 3)), // 64, //don't use MaxUint64, as bson can't write it - Ui8: 160, - B: true, - By: 5, - - Sslice: []string{"one", "two", "three"}, - I64slice: []int64{1, 2, 3}, - I16slice: []int16{4, 5, 6}, - Ui64slice: []uint64{137, 138, 139}, - Ui8slice: []uint8{210, 211, 212}, - Bslice: []bool{true, false, true, false}, - Byslice: []byte{13, 14, 15}, - - Islice: []interface{}{"true", true, "no", false, uint64(288), float64(0.4)}, - - Ms: map[string]interface{}{ - "true": "true", - "int64(9)": false, - }, - Msi64: map[string]int64{ - "one": 1, - "two": 2, - }, - T: timeToCompare1, - AnonInTestStruc: AnonInTestStruc{ - AS: "A-String", - AI64: 64, - AI16: 16, - AUi64: 64, - ASslice: []string{"Aone", "Atwo", "Athree"}, - AI64slice: []int64{1, 2, 3}, - }, - } - //For benchmarks, some things will not work. - if !bench { - //json and bson require string keys in maps - //ts.M = map[interface{}]interface{}{ - // true: "true", - // int8(9): false, - //} - //gob cannot encode nil in element in array (encodeArray: nil element) - ts.Iptrslice = []*int64{nil, &i64a, nil, &i64b, nil, &i64c, nil, &i64d, nil} - // ts.Iptrslice = nil - } - if depth > 0 { - depth-- - if ts.Mtsptr == nil { - ts.Mtsptr = make(map[string]*TestStruc) - } - if ts.Mts == nil { - ts.Mts = make(map[string]TestStruc) - } - ts.Mtsptr["0"] = newTestStruc(depth, bench) - ts.Mts["0"] = *(ts.Mtsptr["0"]) - ts.Its = append(ts.Its, ts.Mtsptr["0"]) - } - return -} - -// doTestCodecTableOne allows us test for different variations based on arguments passed. -func doTestCodecTableOne(t *testing.T, testNil bool, h Handle, - vs []interface{}, vsVerify []interface{}) { - //if testNil, then just test for when a pointer to a nil interface{} is passed. It should work. - //Current setup allows us test (at least manually) the nil interface or typed interface. - logT(t, "================ TestNil: %v ================\n", testNil) - for i, v0 := range vs { - logT(t, "..............................................") - logT(t, " Testing: #%d:, %T, %#v\n", i, v0, v0) - b0, err := testMarshalErr(v0, h, t, "v0") - if err != nil { - continue - } - logT(t, " Encoded bytes: len: %v, %v\n", len(b0), b0) - - var v1 interface{} - - if testNil { - err = testUnmarshal(&v1, b0, h) - } else { - if v0 != nil { - v0rt := reflect.TypeOf(v0) // ptr - rv1 := reflect.New(v0rt) - err = testUnmarshal(rv1.Interface(), b0, h) - v1 = rv1.Elem().Interface() - // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() - } - } - - logT(t, " v1 returned: %T, %#v", v1, v1) - // if v1 != nil { - // logT(t, " v1 returned: %T, %#v", v1, v1) - // //we always indirect, because ptr to typed value may be passed (if not testNil) - // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() - // } - if err != nil { - logT(t, "-------- Error: %v. Partial return: %v", err, v1) - failT(t) - continue - } - v0check := vsVerify[i] - if v0check == skipVerifyVal { - logT(t, " Nil Check skipped: Decoded: %T, %#v\n", v1, v1) - continue - } - - if err = deepEqual(v0check, v1); err == nil { - logT(t, "++++++++ Before and After marshal matched\n") - } else { - logT(t, "-------- Before and After marshal do not match: Error: %v"+ - " ====> GOLDEN: (%T) %#v, DECODED: (%T) %#v\n", err, v0check, v0check, v1, v1) - failT(t) - } - } -} - -func testCodecTableOne(t *testing.T, h Handle) { - // func TestMsgpackAllExperimental(t *testing.T) { - // dopts := testDecOpts(nil, nil, false, true, true), - - switch v := h.(type) { - case *MsgpackHandle: - var oldWriteExt, oldRawToString bool - oldWriteExt, v.WriteExt = v.WriteExt, true - oldRawToString, v.RawToString = v.RawToString, true - doTestCodecTableOne(t, false, h, table, tableVerify) - v.WriteExt, v.RawToString = oldWriteExt, oldRawToString - default: - doTestCodecTableOne(t, false, h, table, tableVerify) - } - // func TestMsgpackAll(t *testing.T) { - idxTime, numPrim, numMap := 19, 23, 4 - - //skip []interface{} containing time.Time - doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim]) - doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:]) - // func TestMsgpackNilStringMap(t *testing.T) { - var oldMapType reflect.Type - v := h.getBasicHandle() - oldMapType, v.MapType = v.MapType, mapStrIntfTyp - - //skip time.Time, []interface{} containing time.Time, last map, and newStruc - doTestCodecTableOne(t, true, h, table[:idxTime], tableTestNilVerify[:idxTime]) - doTestCodecTableOne(t, true, h, table[numPrim+1:numPrim+numMap], tableTestNilVerify[numPrim+1:numPrim+numMap]) - - v.MapType = oldMapType - - // func TestMsgpackNilIntf(t *testing.T) { - - //do newTestStruc and last element of map - doTestCodecTableOne(t, true, h, table[numPrim+numMap:], tableTestNilVerify[numPrim+numMap:]) - //TODO? What is this one? - //doTestCodecTableOne(t, true, h, table[17:18], tableTestNilVerify[17:18]) -} - -func testCodecMiscOne(t *testing.T, h Handle) { - b, err := testMarshalErr(32, h, t, "32") - // Cannot do this nil one, because faster type assertion decoding will panic - // var i *int32 - // if err = testUnmarshal(b, i, nil); err == nil { - // logT(t, "------- Expecting error because we cannot unmarshal to int32 nil ptr") - // t.FailNow() - // } - var i2 int32 = 0 - err = testUnmarshalErr(&i2, b, h, t, "int32-ptr") - if i2 != int32(32) { - logT(t, "------- didn't unmarshal to 32: Received: %d", i2) - t.FailNow() - } - - // func TestMsgpackDecodePtr(t *testing.T) { - ts := newTestStruc(0, false) - b, err = testMarshalErr(ts, h, t, "pointer-to-struct") - if len(b) < 40 { - logT(t, "------- Size must be > 40. Size: %d", len(b)) - t.FailNow() - } - logT(t, "------- b: %v", b) - ts2 := new(TestStruc) - err = testUnmarshalErr(ts2, b, h, t, "pointer-to-struct") - if ts2.I64 != math.MaxInt64*2/3 { - logT(t, "------- Unmarshal wrong. Expect I64 = 64. Got: %v", ts2.I64) - t.FailNow() - } - - // func TestMsgpackIntfDecode(t *testing.T) { - m := map[string]int{"A": 2, "B": 3} - p := []interface{}{m} - bs, err := testMarshalErr(p, h, t, "p") - - m2 := map[string]int{} - p2 := []interface{}{m2} - err = testUnmarshalErr(&p2, bs, h, t, "&p2") - - if m2["A"] != 2 || m2["B"] != 3 { - logT(t, "m2 not as expected: expecting: %v, got: %v", m, m2) - t.FailNow() - } - // log("m: %v, m2: %v, p: %v, p2: %v", m, m2, p, p2) - checkEqualT(t, p, p2, "p=p2") - checkEqualT(t, m, m2, "m=m2") - if err = deepEqual(p, p2); err == nil { - logT(t, "p and p2 match") - } else { - logT(t, "Not Equal: %v. p: %v, p2: %v", err, p, p2) - t.FailNow() - } - if err = deepEqual(m, m2); err == nil { - logT(t, "m and m2 match") - } else { - logT(t, "Not Equal: %v. m: %v, m2: %v", err, m, m2) - t.FailNow() - } - - // func TestMsgpackDecodeStructSubset(t *testing.T) { - // test that we can decode a subset of the stream - mm := map[string]interface{}{"A": 5, "B": 99, "C": 333} - bs, err = testMarshalErr(mm, h, t, "mm") - type ttt struct { - A uint8 - C int32 - } - var t2 ttt - testUnmarshalErr(&t2, bs, h, t, "t2") - t3 := ttt{5, 333} - checkEqualT(t, t2, t3, "t2=t3") - - // println(">>>>>") - // test simple arrays, non-addressable arrays, slices - type tarr struct { - A int64 - B [3]int64 - C []byte - D [3]byte - } - var tarr0 = tarr{1, [3]int64{2, 3, 4}, []byte{4, 5, 6}, [3]byte{7, 8, 9}} - // test both pointer and non-pointer (value) - for _, tarr1 := range []interface{}{tarr0, &tarr0} { - bs, err = testMarshalErr(tarr1, h, t, "tarr1") - var tarr2 tarr - testUnmarshalErr(&tarr2, bs, h, t, "tarr2") - checkEqualT(t, tarr0, tarr2, "tarr0=tarr2") - // fmt.Printf(">>>> err: %v. tarr1: %v, tarr2: %v\n", err, tarr0, tarr2) - } - - // test byte array, even if empty (msgpack only) - if h == testMsgpackH { - type ystruct struct { - Anarray []byte - } - var ya = ystruct{} - testUnmarshalErr(&ya, []byte{0x91, 0x90}, h, t, "ya") - } -} - -func testCodecEmbeddedPointer(t *testing.T, h Handle) { - type Z int - type A struct { - AnInt int - } - type B struct { - *Z - *A - MoreInt int - } - var z Z = 4 - x1 := &B{&z, &A{5}, 6} - bs, err := testMarshalErr(x1, h, t, "x1") - // fmt.Printf("buf: len(%v): %x\n", buf.Len(), buf.Bytes()) - var x2 = new(B) - err = testUnmarshalErr(x2, bs, h, t, "x2") - err = checkEqualT(t, x1, x2, "x1=x2") - _ = err -} - -func doTestRpcOne(t *testing.T, rr Rpc, h Handle, doRequest bool, exitSleepMs time.Duration, -) (port int) { - // rpc needs EOF, which is sent via a panic, and so must be recovered. - if !recoverPanicToErr { - logT(t, "EXPECTED. set recoverPanicToErr=true, since rpc needs EOF") - t.FailNow() - } - srv := rpc.NewServer() - srv.Register(testRpcInt) - ln, err := net.Listen("tcp", "127.0.0.1:0") - // log("listener: %v", ln.Addr()) - checkErrT(t, err) - port = (ln.Addr().(*net.TCPAddr)).Port - // var opts *DecoderOptions - // opts := testDecOpts - // opts.MapType = mapStrIntfTyp - // opts.RawToString = false - serverExitChan := make(chan bool, 1) - var serverExitFlag uint64 = 0 - serverFn := func() { - for { - conn1, err1 := ln.Accept() - // if err1 != nil { - // //fmt.Printf("accept err1: %v\n", err1) - // continue - // } - if atomic.LoadUint64(&serverExitFlag) == 1 { - serverExitChan <- true - conn1.Close() - return // exit serverFn goroutine - } - if err1 == nil { - var sc rpc.ServerCodec = rr.ServerCodec(conn1, h) - srv.ServeCodec(sc) - } - } - } - - clientFn := func(cc rpc.ClientCodec) { - cl := rpc.NewClientWithCodec(cc) - defer cl.Close() - var up, sq, mult int - var rstr string - // log("Calling client") - checkErrT(t, cl.Call("TestRpcInt.Update", 5, &up)) - // log("Called TestRpcInt.Update") - checkEqualT(t, testRpcInt.i, 5, "testRpcInt.i=5") - checkEqualT(t, up, 5, "up=5") - checkErrT(t, cl.Call("TestRpcInt.Square", 1, &sq)) - checkEqualT(t, sq, 25, "sq=25") - checkErrT(t, cl.Call("TestRpcInt.Mult", 20, &mult)) - checkEqualT(t, mult, 100, "mult=100") - checkErrT(t, cl.Call("TestRpcInt.EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr)) - checkEqualT(t, rstr, fmt.Sprintf("%#v", TestABC{"Aa", "Bb", "Cc"}), "rstr=") - checkErrT(t, cl.Call("TestRpcInt.Echo123", []string{"A1", "B2", "C3"}, &rstr)) - checkEqualT(t, rstr, fmt.Sprintf("%#v", []string{"A1", "B2", "C3"}), "rstr=") - } - - connFn := func() (bs net.Conn) { - // log("calling f1") - bs, err2 := net.Dial(ln.Addr().Network(), ln.Addr().String()) - //fmt.Printf("f1. bs: %v, err2: %v\n", bs, err2) - checkErrT(t, err2) - return - } - - exitFn := func() { - atomic.StoreUint64(&serverExitFlag, 1) - bs := connFn() - <-serverExitChan - bs.Close() - // serverExitChan <- true - } - - go serverFn() - runtime.Gosched() - //time.Sleep(100 * time.Millisecond) - if exitSleepMs == 0 { - defer ln.Close() - defer exitFn() - } - if doRequest { - bs := connFn() - cc := rr.ClientCodec(bs, h) - clientFn(cc) - } - if exitSleepMs != 0 { - go func() { - defer ln.Close() - time.Sleep(exitSleepMs) - exitFn() - }() - } - return -} - -// Comprehensive testing that generates data encoded from python msgpack, -// and validates that our code can read and write it out accordingly. -// We keep this unexported here, and put actual test in ext_dep_test.go. -// This way, it can be excluded by excluding file completely. -func doTestMsgpackPythonGenStreams(t *testing.T) { - logT(t, "TestPythonGenStreams") - tmpdir, err := ioutil.TempDir("", "golang-msgpack-test") - if err != nil { - logT(t, "-------- Unable to create temp directory\n") - t.FailNow() - } - defer os.RemoveAll(tmpdir) - logT(t, "tmpdir: %v", tmpdir) - cmd := exec.Command("python", "msgpack_test.py", "testdata", tmpdir) - //cmd.Stdin = strings.NewReader("some input") - //cmd.Stdout = &out - var cmdout []byte - if cmdout, err = cmd.CombinedOutput(); err != nil { - logT(t, "-------- Error running msgpack_test.py testdata. Err: %v", err) - logT(t, " %v", string(cmdout)) - t.FailNow() - } - - oldMapType := testMsgpackH.MapType - for i, v := range tablePythonVerify { - testMsgpackH.MapType = oldMapType - //load up the golden file based on number - //decode it - //compare to in-mem object - //encode it again - //compare to output stream - logT(t, "..............................................") - logT(t, " Testing: #%d: %T, %#v\n", i, v, v) - var bss []byte - bss, err = ioutil.ReadFile(filepath.Join(tmpdir, strconv.Itoa(i)+".golden")) - if err != nil { - logT(t, "-------- Error reading golden file: %d. Err: %v", i, err) - failT(t) - continue - } - testMsgpackH.MapType = mapStrIntfTyp - - var v1 interface{} - if err = testUnmarshal(&v1, bss, testMsgpackH); err != nil { - logT(t, "-------- Error decoding stream: %d: Err: %v", i, err) - failT(t) - continue - } - if v == skipVerifyVal { - continue - } - //no need to indirect, because we pass a nil ptr, so we already have the value - //if v1 != nil { v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() } - if err = deepEqual(v, v1); err == nil { - logT(t, "++++++++ Objects match") - } else { - logT(t, "-------- Objects do not match: %v. Source: %T. Decoded: %T", err, v, v1) - logT(t, "-------- AGAINST: %#v", v) - logT(t, "-------- DECODED: %#v <====> %#v", v1, reflect.Indirect(reflect.ValueOf(v1)).Interface()) - failT(t) - } - bsb, err := testMarshal(v1, testMsgpackH) - if err != nil { - logT(t, "Error encoding to stream: %d: Err: %v", i, err) - failT(t) - continue - } - if err = deepEqual(bsb, bss); err == nil { - logT(t, "++++++++ Bytes match") - } else { - logT(t, "???????? Bytes do not match. %v.", err) - xs := "--------" - if reflect.ValueOf(v).Kind() == reflect.Map { - xs = " " - logT(t, "%s It's a map. Ok that they don't match (dependent on ordering).", xs) - } else { - logT(t, "%s It's not a map. They should match.", xs) - failT(t) - } - logT(t, "%s FROM_FILE: %4d] %v", xs, len(bss), bss) - logT(t, "%s ENCODED: %4d] %v", xs, len(bsb), bsb) - } - } - testMsgpackH.MapType = oldMapType -} - -// To test MsgpackSpecRpc, we test 3 scenarios: -// - Go Client to Go RPC Service (contained within TestMsgpackRpcSpec) -// - Go client to Python RPC Service (contained within doTestMsgpackRpcSpecGoClientToPythonSvc) -// - Python Client to Go RPC Service (contained within doTestMsgpackRpcSpecPythonClientToGoSvc) -// -// This allows us test the different calling conventions -// - Go Service requires only one argument -// - Python Service allows multiple arguments - -func doTestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) { - openPort := "6789" - cmd := exec.Command("python", "msgpack_test.py", "rpc-server", openPort, "2") - checkErrT(t, cmd.Start()) - time.Sleep(100 * time.Millisecond) // time for python rpc server to start - bs, err2 := net.Dial("tcp", ":"+openPort) - checkErrT(t, err2) - cc := MsgpackSpecRpc.ClientCodec(bs, testMsgpackH) - cl := rpc.NewClientWithCodec(cc) - defer cl.Close() - var rstr string - checkErrT(t, cl.Call("EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr)) - //checkEqualT(t, rstr, "{'A': 'Aa', 'B': 'Bb', 'C': 'Cc'}") - var mArgs MsgpackSpecRpcMultiArgs = []interface{}{"A1", "B2", "C3"} - checkErrT(t, cl.Call("Echo123", mArgs, &rstr)) - checkEqualT(t, rstr, "1:A1 2:B2 3:C3", "rstr=") -} - -func doTestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) { - port := doTestRpcOne(t, MsgpackSpecRpc, testMsgpackH, false, 1*time.Second) - //time.Sleep(1000 * time.Millisecond) - cmd := exec.Command("python", "msgpack_test.py", "rpc-client-go-service", strconv.Itoa(port)) - var cmdout []byte - var err error - if cmdout, err = cmd.CombinedOutput(); err != nil { - logT(t, "-------- Error running msgpack_test.py rpc-client-go-service. Err: %v", err) - logT(t, " %v", string(cmdout)) - t.FailNow() - } - checkEqualT(t, string(cmdout), - fmt.Sprintf("%#v\n%#v\n", []string{"A1", "B2", "C3"}, TestABC{"Aa", "Bb", "Cc"}), "cmdout=") -} - -func TestBincCodecsTable(t *testing.T) { - testCodecTableOne(t, testBincH) -} - -func TestBincCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testBincH) -} - -func TestBincCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testBincH) -} - -func TestSimpleCodecsTable(t *testing.T) { - testCodecTableOne(t, testSimpleH) -} - -func TestSimpleCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testSimpleH) -} - -func TestSimpleCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testSimpleH) -} - -func TestMsgpackCodecsTable(t *testing.T) { - testCodecTableOne(t, testMsgpackH) -} - -func TestMsgpackCodecsMisc(t *testing.T) { - testCodecMiscOne(t, testMsgpackH) -} - -func TestMsgpackCodecsEmbeddedPointer(t *testing.T) { - testCodecEmbeddedPointer(t, testMsgpackH) -} - -func TestBincRpcGo(t *testing.T) { - doTestRpcOne(t, GoRpc, testBincH, true, 0) -} - -func _TestSimpleRpcGo(t *testing.T) { - doTestRpcOne(t, GoRpc, testSimpleH, true, 0) -} - -func TestMsgpackRpcGo(t *testing.T) { - doTestRpcOne(t, GoRpc, testMsgpackH, true, 0) -} - -func TestMsgpackRpcSpec(t *testing.T) { - doTestRpcOne(t, MsgpackSpecRpc, testMsgpackH, true, 0) -} - -// TODO: -// Add Tests for: -// - decoding empty list/map in stream into a nil slice/map -// - binary(M|Unm)arsher support for time.Time diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/decode.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/decode.go deleted file mode 100644 index 87bef2b93..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/decode.go +++ /dev/null @@ -1,1048 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "io" - "reflect" - // "runtime/debug" -) - -// Some tagging information for error messages. -const ( - msgTagDec = "codec.decoder" - msgBadDesc = "Unrecognized descriptor byte" - msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" -) - -// decReader abstracts the reading source, allowing implementations that can -// read from an io.Reader or directly off a byte slice with zero-copying. -type decReader interface { - readn(n int) []byte - readb([]byte) - readn1() uint8 - readUint16() uint16 - readUint32() uint32 - readUint64() uint64 -} - -type decDriver interface { - initReadNext() - tryDecodeAsNil() bool - currentEncodedType() valueType - isBuiltinType(rt uintptr) bool - decodeBuiltin(rt uintptr, v interface{}) - //decodeNaked: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). - decodeNaked() (v interface{}, vt valueType, decodeFurther bool) - decodeInt(bitsize uint8) (i int64) - decodeUint(bitsize uint8) (ui uint64) - decodeFloat(chkOverflow32 bool) (f float64) - decodeBool() (b bool) - // decodeString can also decode symbols - decodeString() (s string) - decodeBytes(bs []byte) (bsOut []byte, changed bool) - decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) - readMapLen() int - readArrayLen() int -} - -type DecodeOptions struct { - // An instance of MapType is used during schema-less decoding of a map in the stream. - // If nil, we use map[interface{}]interface{} - MapType reflect.Type - // An instance of SliceType is used during schema-less decoding of an array in the stream. - // If nil, we use []interface{} - SliceType reflect.Type - // ErrorIfNoField controls whether an error is returned when decoding a map - // from a codec stream into a struct, and no matching struct field is found. - ErrorIfNoField bool -} - -// ------------------------------------ - -// ioDecReader is a decReader that reads off an io.Reader -type ioDecReader struct { - r io.Reader - br io.ByteReader - x [8]byte //temp byte array re-used internally for efficiency -} - -func (z *ioDecReader) readn(n int) (bs []byte) { - if n <= 0 { - return - } - bs = make([]byte, n) - if _, err := io.ReadAtLeast(z.r, bs, n); err != nil { - panic(err) - } - return -} - -func (z *ioDecReader) readb(bs []byte) { - if _, err := io.ReadAtLeast(z.r, bs, len(bs)); err != nil { - panic(err) - } -} - -func (z *ioDecReader) readn1() uint8 { - if z.br != nil { - b, err := z.br.ReadByte() - if err != nil { - panic(err) - } - return b - } - z.readb(z.x[:1]) - return z.x[0] -} - -func (z *ioDecReader) readUint16() uint16 { - z.readb(z.x[:2]) - return bigen.Uint16(z.x[:2]) -} - -func (z *ioDecReader) readUint32() uint32 { - z.readb(z.x[:4]) - return bigen.Uint32(z.x[:4]) -} - -func (z *ioDecReader) readUint64() uint64 { - z.readb(z.x[:8]) - return bigen.Uint64(z.x[:8]) -} - -// ------------------------------------ - -// bytesDecReader is a decReader that reads off a byte slice with zero copying -type bytesDecReader struct { - b []byte // data - c int // cursor - a int // available -} - -func (z *bytesDecReader) consume(n int) (oldcursor int) { - if z.a == 0 { - panic(io.EOF) - } - if n > z.a { - decErr("Trying to read %v bytes. Only %v available", n, z.a) - } - // z.checkAvailable(n) - oldcursor = z.c - z.c = oldcursor + n - z.a = z.a - n - return -} - -func (z *bytesDecReader) readn(n int) (bs []byte) { - if n <= 0 { - return - } - c0 := z.consume(n) - bs = z.b[c0:z.c] - return -} - -func (z *bytesDecReader) readb(bs []byte) { - copy(bs, z.readn(len(bs))) -} - -func (z *bytesDecReader) readn1() uint8 { - c0 := z.consume(1) - return z.b[c0] -} - -// Use binaryEncoding helper for 4 and 8 bits, but inline it for 2 bits -// creating temp slice variable and copying it to helper function is expensive -// for just 2 bits. - -func (z *bytesDecReader) readUint16() uint16 { - c0 := z.consume(2) - return uint16(z.b[c0+1]) | uint16(z.b[c0])<<8 -} - -func (z *bytesDecReader) readUint32() uint32 { - c0 := z.consume(4) - return bigen.Uint32(z.b[c0:z.c]) -} - -func (z *bytesDecReader) readUint64() uint64 { - c0 := z.consume(8) - return bigen.Uint64(z.b[c0:z.c]) -} - -// ------------------------------------ - -// decFnInfo has methods for registering handling decoding of a specific type -// based on some characteristics (builtin, extension, reflect Kind, etc) -type decFnInfo struct { - ti *typeInfo - d *Decoder - dd decDriver - xfFn func(reflect.Value, []byte) error - xfTag byte - array bool -} - -func (f *decFnInfo) builtin(rv reflect.Value) { - f.dd.decodeBuiltin(f.ti.rtid, rv.Addr().Interface()) -} - -func (f *decFnInfo) rawExt(rv reflect.Value) { - xtag, xbs := f.dd.decodeExt(false, 0) - rv.Field(0).SetUint(uint64(xtag)) - rv.Field(1).SetBytes(xbs) -} - -func (f *decFnInfo) ext(rv reflect.Value) { - _, xbs := f.dd.decodeExt(true, f.xfTag) - if fnerr := f.xfFn(rv, xbs); fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) binaryMarshal(rv reflect.Value) { - var bm binaryUnmarshaler - if f.ti.unmIndir == -1 { - bm = rv.Addr().Interface().(binaryUnmarshaler) - } else if f.ti.unmIndir == 0 { - bm = rv.Interface().(binaryUnmarshaler) - } else { - for j, k := int8(0), f.ti.unmIndir; j < k; j++ { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - bm = rv.Interface().(binaryUnmarshaler) - } - xbs, _ := f.dd.decodeBytes(nil) - if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { - panic(fnerr) - } -} - -func (f *decFnInfo) kErr(rv reflect.Value) { - decErr("Unhandled value for kind: %v: %s", rv.Kind(), msgBadDesc) -} - -func (f *decFnInfo) kString(rv reflect.Value) { - rv.SetString(f.dd.decodeString()) -} - -func (f *decFnInfo) kBool(rv reflect.Value) { - rv.SetBool(f.dd.decodeBool()) -} - -func (f *decFnInfo) kInt(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(intBitsize)) -} - -func (f *decFnInfo) kInt64(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(64)) -} - -func (f *decFnInfo) kInt32(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(32)) -} - -func (f *decFnInfo) kInt8(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(8)) -} - -func (f *decFnInfo) kInt16(rv reflect.Value) { - rv.SetInt(f.dd.decodeInt(16)) -} - -func (f *decFnInfo) kFloat32(rv reflect.Value) { - rv.SetFloat(f.dd.decodeFloat(true)) -} - -func (f *decFnInfo) kFloat64(rv reflect.Value) { - rv.SetFloat(f.dd.decodeFloat(false)) -} - -func (f *decFnInfo) kUint8(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(8)) -} - -func (f *decFnInfo) kUint64(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(64)) -} - -func (f *decFnInfo) kUint(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(uintBitsize)) -} - -func (f *decFnInfo) kUint32(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(32)) -} - -func (f *decFnInfo) kUint16(rv reflect.Value) { - rv.SetUint(f.dd.decodeUint(16)) -} - -// func (f *decFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") -// if rv.IsNil() { -// rv.Set(reflect.New(rv.Type().Elem())) -// } -// f.d.decodeValue(rv.Elem()) -// } - -func (f *decFnInfo) kInterface(rv reflect.Value) { - // debugf("\t===> kInterface") - if !rv.IsNil() { - f.d.decodeValue(rv.Elem()) - return - } - // nil interface: - // use some hieristics to set the nil interface to an - // appropriate value based on the first byte read (byte descriptor bd) - v, vt, decodeFurther := f.dd.decodeNaked() - if vt == valueTypeNil { - return - } - // Cannot decode into nil interface with methods (e.g. error, io.Reader, etc) - // if non-nil value in stream. - if num := f.ti.rt.NumMethod(); num > 0 { - decErr("decodeValue: Cannot decode non-nil codec value into nil %v (%v methods)", - f.ti.rt, num) - } - var rvn reflect.Value - var useRvn bool - switch vt { - case valueTypeMap: - if f.d.h.MapType == nil { - var m2 map[interface{}]interface{} - v = &m2 - } else { - rvn = reflect.New(f.d.h.MapType).Elem() - useRvn = true - } - case valueTypeArray: - if f.d.h.SliceType == nil { - var m2 []interface{} - v = &m2 - } else { - rvn = reflect.New(f.d.h.SliceType).Elem() - useRvn = true - } - case valueTypeExt: - re := v.(*RawExt) - var bfn func(reflect.Value, []byte) error - rvn, bfn = f.d.h.getDecodeExtForTag(re.Tag) - if bfn == nil { - rvn = reflect.ValueOf(*re) - } else if fnerr := bfn(rvn, re.Data); fnerr != nil { - panic(fnerr) - } - rv.Set(rvn) - return - } - if decodeFurther { - if useRvn { - f.d.decodeValue(rvn) - } else if v != nil { - // this v is a pointer, so we need to dereference it when done - f.d.decode(v) - rvn = reflect.ValueOf(v).Elem() - useRvn = true - } - } - if useRvn { - rv.Set(rvn) - } else if v != nil { - rv.Set(reflect.ValueOf(v)) - } -} - -func (f *decFnInfo) kStruct(rv reflect.Value) { - fti := f.ti - if currEncodedType := f.dd.currentEncodedType(); currEncodedType == valueTypeMap { - containerLen := f.dd.readMapLen() - if containerLen == 0 { - return - } - tisfi := fti.sfi - for j := 0; j < containerLen; j++ { - // var rvkencname string - // ddecode(&rvkencname) - f.dd.initReadNext() - rvkencname := f.dd.decodeString() - // rvksi := ti.getForEncName(rvkencname) - if k := fti.indexForEncName(rvkencname); k > -1 { - sfik := tisfi[k] - if sfik.i != -1 { - f.d.decodeValue(rv.Field(int(sfik.i))) - } else { - f.d.decEmbeddedField(rv, sfik.is) - } - // f.d.decodeValue(ti.field(k, rv)) - } else { - if f.d.h.ErrorIfNoField { - decErr("No matching struct field found when decoding stream map with key: %v", - rvkencname) - } else { - var nilintf0 interface{} - f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) - } - } - } - } else if currEncodedType == valueTypeArray { - containerLen := f.dd.readArrayLen() - if containerLen == 0 { - return - } - for j, si := range fti.sfip { - if j == containerLen { - break - } - if si.i != -1 { - f.d.decodeValue(rv.Field(int(si.i))) - } else { - f.d.decEmbeddedField(rv, si.is) - } - } - if containerLen > len(fti.sfip) { - // read remaining values and throw away - for j := len(fti.sfip); j < containerLen; j++ { - var nilintf0 interface{} - f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) - } - } - } else { - decErr("Only encoded map or array can be decoded into a struct. (valueType: %x)", - currEncodedType) - } -} - -func (f *decFnInfo) kSlice(rv reflect.Value) { - // A slice can be set from a map or array in stream. - currEncodedType := f.dd.currentEncodedType() - - switch currEncodedType { - case valueTypeBytes, valueTypeString: - if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { - if bs2, changed2 := f.dd.decodeBytes(rv.Bytes()); changed2 { - rv.SetBytes(bs2) - } - return - } - } - - if shortCircuitReflectToFastPath && rv.CanAddr() { - switch f.ti.rtid { - case intfSliceTypId: - f.d.decSliceIntf(rv.Addr().Interface().(*[]interface{}), currEncodedType, f.array) - return - case uint64SliceTypId: - f.d.decSliceUint64(rv.Addr().Interface().(*[]uint64), currEncodedType, f.array) - return - case int64SliceTypId: - f.d.decSliceInt64(rv.Addr().Interface().(*[]int64), currEncodedType, f.array) - return - case strSliceTypId: - f.d.decSliceStr(rv.Addr().Interface().(*[]string), currEncodedType, f.array) - return - } - } - - containerLen, containerLenS := decContLens(f.dd, currEncodedType) - - // an array can never return a nil slice. so no need to check f.array here. - - if rv.IsNil() { - rv.Set(reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS)) - } - - if containerLen == 0 { - return - } - - if rvcap, rvlen := rv.Len(), rv.Cap(); containerLenS > rvcap { - if f.array { // !rv.CanSet() - decErr(msgDecCannotExpandArr, rvcap, containerLenS) - } - rvn := reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS) - if rvlen > 0 { - reflect.Copy(rvn, rv) - } - rv.Set(rvn) - } else if containerLenS > rvlen { - rv.SetLen(containerLenS) - } - - for j := 0; j < containerLenS; j++ { - f.d.decodeValue(rv.Index(j)) - } -} - -func (f *decFnInfo) kArray(rv reflect.Value) { - // f.d.decodeValue(rv.Slice(0, rv.Len())) - f.kSlice(rv.Slice(0, rv.Len())) -} - -func (f *decFnInfo) kMap(rv reflect.Value) { - if shortCircuitReflectToFastPath && rv.CanAddr() { - switch f.ti.rtid { - case mapStrIntfTypId: - f.d.decMapStrIntf(rv.Addr().Interface().(*map[string]interface{})) - return - case mapIntfIntfTypId: - f.d.decMapIntfIntf(rv.Addr().Interface().(*map[interface{}]interface{})) - return - case mapInt64IntfTypId: - f.d.decMapInt64Intf(rv.Addr().Interface().(*map[int64]interface{})) - return - case mapUint64IntfTypId: - f.d.decMapUint64Intf(rv.Addr().Interface().(*map[uint64]interface{})) - return - } - } - - containerLen := f.dd.readMapLen() - - if rv.IsNil() { - rv.Set(reflect.MakeMap(f.ti.rt)) - } - - if containerLen == 0 { - return - } - - ktype, vtype := f.ti.rt.Key(), f.ti.rt.Elem() - ktypeId := reflect.ValueOf(ktype).Pointer() - for j := 0; j < containerLen; j++ { - rvk := reflect.New(ktype).Elem() - f.d.decodeValue(rvk) - - // special case if a byte array. - // if ktype == intfTyp { - if ktypeId == intfTypId { - rvk = rvk.Elem() - if rvk.Type() == uint8SliceTyp { - rvk = reflect.ValueOf(string(rvk.Bytes())) - } - } - rvv := rv.MapIndex(rvk) - if !rvv.IsValid() { - rvv = reflect.New(vtype).Elem() - } - - f.d.decodeValue(rvv) - rv.SetMapIndex(rvk, rvv) - } -} - -// ---------------------------------------- - -type decFn struct { - i *decFnInfo - f func(*decFnInfo, reflect.Value) -} - -// A Decoder reads and decodes an object from an input stream in the codec format. -type Decoder struct { - r decReader - d decDriver - h *BasicHandle - f map[uintptr]decFn - x []uintptr - s []decFn -} - -// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. -// -// For efficiency, Users are encouraged to pass in a memory buffered writer -// (eg bufio.Reader, bytes.Buffer). -func NewDecoder(r io.Reader, h Handle) *Decoder { - z := ioDecReader{ - r: r, - } - z.br, _ = r.(io.ByteReader) - return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} -} - -// NewDecoderBytes returns a Decoder which efficiently decodes directly -// from a byte slice with zero copying. -func NewDecoderBytes(in []byte, h Handle) *Decoder { - z := bytesDecReader{ - b: in, - a: len(in), - } - return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} -} - -// Decode decodes the stream from reader and stores the result in the -// value pointed to by v. v cannot be a nil pointer. v can also be -// a reflect.Value of a pointer. -// -// Note that a pointer to a nil interface is not a nil pointer. -// If you do not know what type of stream it is, pass in a pointer to a nil interface. -// We will decode and store a value in that nil interface. -// -// Sample usages: -// // Decoding into a non-nil typed value -// var f float32 -// err = codec.NewDecoder(r, handle).Decode(&f) -// -// // Decoding into nil interface -// var v interface{} -// dec := codec.NewDecoder(r, handle) -// err = dec.Decode(&v) -// -// When decoding into a nil interface{}, we will decode into an appropriate value based -// on the contents of the stream: -// - Numbers are decoded as float64, int64 or uint64. -// - Other values are decoded appropriately depending on the type: -// bool, string, []byte, time.Time, etc -// - Extensions are decoded as RawExt (if no ext function registered for the tag) -// Configurations exist on the Handle to override defaults -// (e.g. for MapType, SliceType and how to decode raw bytes). -// -// When decoding into a non-nil interface{} value, the mode of encoding is based on the -// type of the value. When a value is seen: -// - If an extension is registered for it, call that extension function -// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error -// - Else decode it based on its reflect.Kind -// -// There are some special rules when decoding into containers (slice/array/map/struct). -// Decode will typically use the stream contents to UPDATE the container. -// - A map can be decoded from a stream map, by updating matching keys. -// - A slice can be decoded from a stream array, -// by updating the first n elements, where n is length of the stream. -// - A slice can be decoded from a stream map, by decoding as if -// it contains a sequence of key-value pairs. -// - A struct can be decoded from a stream map, by updating matching fields. -// - A struct can be decoded from a stream array, -// by updating fields as they occur in the struct (by index). -// -// When decoding a stream map or array with length of 0 into a nil map or slice, -// we reset the destination map or slice to a zero-length value. -// -// However, when decoding a stream nil, we reset the destination container -// to its "zero" value (e.g. nil for slice/map, etc). -// -func (d *Decoder) Decode(v interface{}) (err error) { - defer panicToErr(&err) - d.decode(v) - return -} - -func (d *Decoder) decode(iv interface{}) { - d.d.initReadNext() - - switch v := iv.(type) { - case nil: - decErr("Cannot decode into nil.") - - case reflect.Value: - d.chkPtrValue(v) - d.decodeValue(v.Elem()) - - case *string: - *v = d.d.decodeString() - case *bool: - *v = d.d.decodeBool() - case *int: - *v = int(d.d.decodeInt(intBitsize)) - case *int8: - *v = int8(d.d.decodeInt(8)) - case *int16: - *v = int16(d.d.decodeInt(16)) - case *int32: - *v = int32(d.d.decodeInt(32)) - case *int64: - *v = d.d.decodeInt(64) - case *uint: - *v = uint(d.d.decodeUint(uintBitsize)) - case *uint8: - *v = uint8(d.d.decodeUint(8)) - case *uint16: - *v = uint16(d.d.decodeUint(16)) - case *uint32: - *v = uint32(d.d.decodeUint(32)) - case *uint64: - *v = d.d.decodeUint(64) - case *float32: - *v = float32(d.d.decodeFloat(true)) - case *float64: - *v = d.d.decodeFloat(false) - case *[]byte: - *v, _ = d.d.decodeBytes(*v) - - case *[]interface{}: - d.decSliceIntf(v, valueTypeInvalid, false) - case *[]uint64: - d.decSliceUint64(v, valueTypeInvalid, false) - case *[]int64: - d.decSliceInt64(v, valueTypeInvalid, false) - case *[]string: - d.decSliceStr(v, valueTypeInvalid, false) - case *map[string]interface{}: - d.decMapStrIntf(v) - case *map[interface{}]interface{}: - d.decMapIntfIntf(v) - case *map[uint64]interface{}: - d.decMapUint64Intf(v) - case *map[int64]interface{}: - d.decMapInt64Intf(v) - - case *interface{}: - d.decodeValue(reflect.ValueOf(iv).Elem()) - - default: - rv := reflect.ValueOf(iv) - d.chkPtrValue(rv) - d.decodeValue(rv.Elem()) - } -} - -func (d *Decoder) decodeValue(rv reflect.Value) { - d.d.initReadNext() - - if d.d.tryDecodeAsNil() { - // If value in stream is nil, set the dereferenced value to its "zero" value (if settable) - if rv.Kind() == reflect.Ptr { - if !rv.IsNil() { - rv.Set(reflect.Zero(rv.Type())) - } - return - } - // for rv.Kind() == reflect.Ptr { - // rv = rv.Elem() - // } - if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid - rv.Set(reflect.Zero(rv.Type())) - } - return - } - - // If stream is not containing a nil value, then we can deref to the base - // non-pointer value, and decode into that. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - - // retrieve or register a focus'ed function for this type - // to eliminate need to do the retrieval multiple times - - // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } - var fn decFn - var ok bool - if useMapForCodecCache { - fn, ok = d.f[rtid] - } else { - for i, v := range d.x { - if v == rtid { - fn, ok = d.s[i], true - break - } - } - } - if !ok { - // debugf("\tCreating new dec fn for type: %v\n", rt) - fi := decFnInfo{ti: getTypeInfo(rtid, rt), d: d, dd: d.d} - fn.i = &fi - // An extension can be registered for any type, regardless of the Kind - // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. - // - // We can't check if it's an extension byte here first, because the user may have - // registered a pointer or non-pointer type, meaning we may have to recurse first - // before matching a mapped type, even though the extension byte is already detected. - // - // NOTE: if decoding into a nil interface{}, we return a non-nil - // value except even if the container registers a length of 0. - if rtid == rawExtTypId { - fn.f = (*decFnInfo).rawExt - } else if d.d.isBuiltinType(rtid) { - fn.f = (*decFnInfo).builtin - } else if xfTag, xfFn := d.h.getDecodeExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfTag, xfFn - fn.f = (*decFnInfo).ext - } else if supportBinaryMarshal && fi.ti.unm { - fn.f = (*decFnInfo).binaryMarshal - } else { - switch rk := rt.Kind(); rk { - case reflect.String: - fn.f = (*decFnInfo).kString - case reflect.Bool: - fn.f = (*decFnInfo).kBool - case reflect.Int: - fn.f = (*decFnInfo).kInt - case reflect.Int64: - fn.f = (*decFnInfo).kInt64 - case reflect.Int32: - fn.f = (*decFnInfo).kInt32 - case reflect.Int8: - fn.f = (*decFnInfo).kInt8 - case reflect.Int16: - fn.f = (*decFnInfo).kInt16 - case reflect.Float32: - fn.f = (*decFnInfo).kFloat32 - case reflect.Float64: - fn.f = (*decFnInfo).kFloat64 - case reflect.Uint8: - fn.f = (*decFnInfo).kUint8 - case reflect.Uint64: - fn.f = (*decFnInfo).kUint64 - case reflect.Uint: - fn.f = (*decFnInfo).kUint - case reflect.Uint32: - fn.f = (*decFnInfo).kUint32 - case reflect.Uint16: - fn.f = (*decFnInfo).kUint16 - // case reflect.Ptr: - // fn.f = (*decFnInfo).kPtr - case reflect.Interface: - fn.f = (*decFnInfo).kInterface - case reflect.Struct: - fn.f = (*decFnInfo).kStruct - case reflect.Slice: - fn.f = (*decFnInfo).kSlice - case reflect.Array: - fi.array = true - fn.f = (*decFnInfo).kArray - case reflect.Map: - fn.f = (*decFnInfo).kMap - default: - fn.f = (*decFnInfo).kErr - } - } - if useMapForCodecCache { - if d.f == nil { - d.f = make(map[uintptr]decFn, 16) - } - d.f[rtid] = fn - } else { - d.s = append(d.s, fn) - d.x = append(d.x, rtid) - } - } - - fn.f(fn.i, rv) - - return -} - -func (d *Decoder) chkPtrValue(rv reflect.Value) { - // We can only decode into a non-nil pointer - if rv.Kind() == reflect.Ptr && !rv.IsNil() { - return - } - if !rv.IsValid() { - decErr("Cannot decode into a zero (ie invalid) reflect.Value") - } - if !rv.CanInterface() { - decErr("Cannot decode into a value without an interface: %v", rv) - } - rvi := rv.Interface() - decErr("Cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", - rv.Kind(), rvi, rvi) -} - -func (d *Decoder) decEmbeddedField(rv reflect.Value, index []int) { - // d.decodeValue(rv.FieldByIndex(index)) - // nil pointers may be here; so reproduce FieldByIndex logic + enhancements - for _, j := range index { - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - // If a pointer, it must be a pointer to struct (based on typeInfo contract) - rv = rv.Elem() - } - rv = rv.Field(j) - } - d.decodeValue(rv) -} - -// -------------------------------------------------- - -// short circuit functions for common maps and slices - -func (d *Decoder) decSliceIntf(v *[]interface{}, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]interface{}, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]interface{}, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - d.decode(&s[j]) - } - *v = s -} - -func (d *Decoder) decSliceInt64(v *[]int64, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]int64, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]int64, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeInt(intBitsize) - } - *v = s -} - -func (d *Decoder) decSliceUint64(v *[]uint64, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]uint64, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]uint64, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeUint(intBitsize) - } - *v = s -} - -func (d *Decoder) decSliceStr(v *[]string, currEncodedType valueType, doNotReset bool) { - _, containerLenS := decContLens(d.d, currEncodedType) - s := *v - if s == nil { - s = make([]string, containerLenS, containerLenS) - } else if containerLenS > cap(s) { - if doNotReset { - decErr(msgDecCannotExpandArr, cap(s), containerLenS) - } - s = make([]string, containerLenS, containerLenS) - copy(s, *v) - } else if containerLenS > len(s) { - s = s[:containerLenS] - } - for j := 0; j < containerLenS; j++ { - // d.decode(&s[j]) - d.d.initReadNext() - s[j] = d.d.decodeString() - } - *v = s -} - -func (d *Decoder) decMapIntfIntf(v *map[interface{}]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[interface{}]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - var mk interface{} - d.decode(&mk) - // special case if a byte array. - if bv, bok := mk.([]byte); bok { - mk = string(bv) - } - mv := m[mk] - d.decode(&mv) - m[mk] = mv - } -} - -func (d *Decoder) decMapInt64Intf(v *map[int64]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[int64]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeInt(intBitsize) - mv := m[mk] - d.decode(&mv) - m[mk] = mv - } -} - -func (d *Decoder) decMapUint64Intf(v *map[uint64]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[uint64]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeUint(intBitsize) - mv := m[mk] - d.decode(&mv) - m[mk] = mv - } -} - -func (d *Decoder) decMapStrIntf(v *map[string]interface{}) { - containerLen := d.d.readMapLen() - m := *v - if m == nil { - m = make(map[string]interface{}, containerLen) - *v = m - } - for j := 0; j < containerLen; j++ { - d.d.initReadNext() - mk := d.d.decodeString() - mv := m[mk] - d.decode(&mv) - m[mk] = mv - } -} - -// ---------------------------------------- - -func decContLens(dd decDriver, currEncodedType valueType) (containerLen, containerLenS int) { - if currEncodedType == valueTypeInvalid { - currEncodedType = dd.currentEncodedType() - } - switch currEncodedType { - case valueTypeArray: - containerLen = dd.readArrayLen() - containerLenS = containerLen - case valueTypeMap: - containerLen = dd.readMapLen() - containerLenS = containerLen * 2 - default: - decErr("Only encoded map or array can be decoded into a slice. (valueType: %0x)", - currEncodedType) - } - return -} - -func decErr(format string, params ...interface{}) { - doPanic(msgTagDec, format, params...) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/encode.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/encode.go deleted file mode 100644 index 4914be0c7..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/encode.go +++ /dev/null @@ -1,1001 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "io" - "reflect" -) - -const ( - // Some tagging information for error messages. - msgTagEnc = "codec.encoder" - defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 - // maxTimeSecs32 = math.MaxInt32 / 60 / 24 / 366 -) - -// AsSymbolFlag defines what should be encoded as symbols. -type AsSymbolFlag uint8 - -const ( - // AsSymbolDefault is default. - // Currently, this means only encode struct field names as symbols. - // The default is subject to change. - AsSymbolDefault AsSymbolFlag = iota - - // AsSymbolAll means encode anything which could be a symbol as a symbol. - AsSymbolAll = 0xfe - - // AsSymbolNone means do not encode anything as a symbol. - AsSymbolNone = 1 << iota - - // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. - AsSymbolMapStringKeysFlag - - // AsSymbolStructFieldName means encode struct field names as symbols. - AsSymbolStructFieldNameFlag -) - -// encWriter abstracting writing to a byte array or to an io.Writer. -type encWriter interface { - writeUint16(uint16) - writeUint32(uint32) - writeUint64(uint64) - writeb([]byte) - writestr(string) - writen1(byte) - writen2(byte, byte) - atEndOfEncode() -} - -// encDriver abstracts the actual codec (binc vs msgpack, etc) -type encDriver interface { - isBuiltinType(rt uintptr) bool - encodeBuiltin(rt uintptr, v interface{}) - encodeNil() - encodeInt(i int64) - encodeUint(i uint64) - encodeBool(b bool) - encodeFloat32(f float32) - encodeFloat64(f float64) - encodeExtPreamble(xtag byte, length int) - encodeArrayPreamble(length int) - encodeMapPreamble(length int) - encodeString(c charEncoding, v string) - encodeSymbol(v string) - encodeStringBytes(c charEncoding, v []byte) - //TODO - //encBignum(f *big.Int) - //encStringRunes(c charEncoding, v []rune) -} - -type ioEncWriterWriter interface { - WriteByte(c byte) error - WriteString(s string) (n int, err error) - Write(p []byte) (n int, err error) -} - -type ioEncStringWriter interface { - WriteString(s string) (n int, err error) -} - -type EncodeOptions struct { - // Encode a struct as an array, and not as a map. - StructToArray bool - - // AsSymbols defines what should be encoded as symbols. - // - // Encoding as symbols can reduce the encoded size significantly. - // - // However, during decoding, each string to be encoded as a symbol must - // be checked to see if it has been seen before. Consequently, encoding time - // will increase if using symbols, because string comparisons has a clear cost. - // - // Sample values: - // AsSymbolNone - // AsSymbolAll - // AsSymbolMapStringKeys - // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag - AsSymbols AsSymbolFlag -} - -// --------------------------------------------- - -type simpleIoEncWriterWriter struct { - w io.Writer - bw io.ByteWriter - sw ioEncStringWriter -} - -func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { - if o.bw != nil { - return o.bw.WriteByte(c) - } - _, err = o.w.Write([]byte{c}) - return -} - -func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { - if o.sw != nil { - return o.sw.WriteString(s) - } - return o.w.Write([]byte(s)) -} - -func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { - return o.w.Write(p) -} - -// ---------------------------------------- - -// ioEncWriter implements encWriter and can write to an io.Writer implementation -type ioEncWriter struct { - w ioEncWriterWriter - x [8]byte // temp byte array re-used internally for efficiency -} - -func (z *ioEncWriter) writeUint16(v uint16) { - bigen.PutUint16(z.x[:2], v) - z.writeb(z.x[:2]) -} - -func (z *ioEncWriter) writeUint32(v uint32) { - bigen.PutUint32(z.x[:4], v) - z.writeb(z.x[:4]) -} - -func (z *ioEncWriter) writeUint64(v uint64) { - bigen.PutUint64(z.x[:8], v) - z.writeb(z.x[:8]) -} - -func (z *ioEncWriter) writeb(bs []byte) { - if len(bs) == 0 { - return - } - n, err := z.w.Write(bs) - if err != nil { - panic(err) - } - if n != len(bs) { - encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n) - } -} - -func (z *ioEncWriter) writestr(s string) { - n, err := z.w.WriteString(s) - if err != nil { - panic(err) - } - if n != len(s) { - encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n) - } -} - -func (z *ioEncWriter) writen1(b byte) { - if err := z.w.WriteByte(b); err != nil { - panic(err) - } -} - -func (z *ioEncWriter) writen2(b1 byte, b2 byte) { - z.writen1(b1) - z.writen1(b2) -} - -func (z *ioEncWriter) atEndOfEncode() {} - -// ---------------------------------------- - -// bytesEncWriter implements encWriter and can write to an byte slice. -// It is used by Marshal function. -type bytesEncWriter struct { - b []byte - c int // cursor - out *[]byte // write out on atEndOfEncode -} - -func (z *bytesEncWriter) writeUint16(v uint16) { - c := z.grow(2) - z.b[c] = byte(v >> 8) - z.b[c+1] = byte(v) -} - -func (z *bytesEncWriter) writeUint32(v uint32) { - c := z.grow(4) - z.b[c] = byte(v >> 24) - z.b[c+1] = byte(v >> 16) - z.b[c+2] = byte(v >> 8) - z.b[c+3] = byte(v) -} - -func (z *bytesEncWriter) writeUint64(v uint64) { - c := z.grow(8) - z.b[c] = byte(v >> 56) - z.b[c+1] = byte(v >> 48) - z.b[c+2] = byte(v >> 40) - z.b[c+3] = byte(v >> 32) - z.b[c+4] = byte(v >> 24) - z.b[c+5] = byte(v >> 16) - z.b[c+6] = byte(v >> 8) - z.b[c+7] = byte(v) -} - -func (z *bytesEncWriter) writeb(s []byte) { - if len(s) == 0 { - return - } - c := z.grow(len(s)) - copy(z.b[c:], s) -} - -func (z *bytesEncWriter) writestr(s string) { - c := z.grow(len(s)) - copy(z.b[c:], s) -} - -func (z *bytesEncWriter) writen1(b1 byte) { - c := z.grow(1) - z.b[c] = b1 -} - -func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { - c := z.grow(2) - z.b[c] = b1 - z.b[c+1] = b2 -} - -func (z *bytesEncWriter) atEndOfEncode() { - *(z.out) = z.b[:z.c] -} - -func (z *bytesEncWriter) grow(n int) (oldcursor int) { - oldcursor = z.c - z.c = oldcursor + n - if z.c > cap(z.b) { - // Tried using appendslice logic: (if cap < 1024, *2, else *1.25). - // However, it was too expensive, causing too many iterations of copy. - // Using bytes.Buffer model was much better (2*cap + n) - bs := make([]byte, 2*cap(z.b)+n) - copy(bs, z.b[:oldcursor]) - z.b = bs - } else if z.c > len(z.b) { - z.b = z.b[:cap(z.b)] - } - return -} - -// --------------------------------------------- - -type encFnInfo struct { - ti *typeInfo - e *Encoder - ee encDriver - xfFn func(reflect.Value) ([]byte, error) - xfTag byte -} - -func (f *encFnInfo) builtin(rv reflect.Value) { - f.ee.encodeBuiltin(f.ti.rtid, rv.Interface()) -} - -func (f *encFnInfo) rawExt(rv reflect.Value) { - f.e.encRawExt(rv.Interface().(RawExt)) -} - -func (f *encFnInfo) ext(rv reflect.Value) { - bs, fnerr := f.xfFn(rv) - if fnerr != nil { - panic(fnerr) - } - if bs == nil { - f.ee.encodeNil() - return - } - if f.e.hh.writeExt() { - f.ee.encodeExtPreamble(f.xfTag, len(bs)) - f.e.w.writeb(bs) - } else { - f.ee.encodeStringBytes(c_RAW, bs) - } - -} - -func (f *encFnInfo) binaryMarshal(rv reflect.Value) { - var bm binaryMarshaler - if f.ti.mIndir == 0 { - bm = rv.Interface().(binaryMarshaler) - } else if f.ti.mIndir == -1 { - bm = rv.Addr().Interface().(binaryMarshaler) - } else { - for j, k := int8(0), f.ti.mIndir; j < k; j++ { - if rv.IsNil() { - f.ee.encodeNil() - return - } - rv = rv.Elem() - } - bm = rv.Interface().(binaryMarshaler) - } - // debugf(">>>> binaryMarshaler: %T", rv.Interface()) - bs, fnerr := bm.MarshalBinary() - if fnerr != nil { - panic(fnerr) - } - if bs == nil { - f.ee.encodeNil() - } else { - f.ee.encodeStringBytes(c_RAW, bs) - } -} - -func (f *encFnInfo) kBool(rv reflect.Value) { - f.ee.encodeBool(rv.Bool()) -} - -func (f *encFnInfo) kString(rv reflect.Value) { - f.ee.encodeString(c_UTF8, rv.String()) -} - -func (f *encFnInfo) kFloat64(rv reflect.Value) { - f.ee.encodeFloat64(rv.Float()) -} - -func (f *encFnInfo) kFloat32(rv reflect.Value) { - f.ee.encodeFloat32(float32(rv.Float())) -} - -func (f *encFnInfo) kInt(rv reflect.Value) { - f.ee.encodeInt(rv.Int()) -} - -func (f *encFnInfo) kUint(rv reflect.Value) { - f.ee.encodeUint(rv.Uint()) -} - -func (f *encFnInfo) kInvalid(rv reflect.Value) { - f.ee.encodeNil() -} - -func (f *encFnInfo) kErr(rv reflect.Value) { - encErr("Unsupported kind: %s, for: %#v", rv.Kind(), rv) -} - -func (f *encFnInfo) kSlice(rv reflect.Value) { - if rv.IsNil() { - f.ee.encodeNil() - return - } - - if shortCircuitReflectToFastPath { - switch f.ti.rtid { - case intfSliceTypId: - f.e.encSliceIntf(rv.Interface().([]interface{})) - return - case strSliceTypId: - f.e.encSliceStr(rv.Interface().([]string)) - return - case uint64SliceTypId: - f.e.encSliceUint64(rv.Interface().([]uint64)) - return - case int64SliceTypId: - f.e.encSliceInt64(rv.Interface().([]int64)) - return - } - } - - // If in this method, then there was no extension function defined. - // So it's okay to treat as []byte. - if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { - f.ee.encodeStringBytes(c_RAW, rv.Bytes()) - return - } - - l := rv.Len() - if f.ti.mbs { - if l%2 == 1 { - encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) - } - f.ee.encodeMapPreamble(l / 2) - } else { - f.ee.encodeArrayPreamble(l) - } - if l == 0 { - return - } - for j := 0; j < l; j++ { - // TODO: Consider perf implication of encoding odd index values as symbols if type is string - f.e.encodeValue(rv.Index(j)) - } -} - -func (f *encFnInfo) kArray(rv reflect.Value) { - // We cannot share kSlice method, because the array may be non-addressable. - // E.g. type struct S{B [2]byte}; Encode(S{}) will bomb on "panic: slice of unaddressable array". - // So we have to duplicate the functionality here. - // f.e.encodeValue(rv.Slice(0, rv.Len())) - // f.kSlice(rv.Slice(0, rv.Len())) - - l := rv.Len() - // Handle an array of bytes specially (in line with what is done for slices) - if f.ti.rt.Elem().Kind() == reflect.Uint8 { - if l == 0 { - f.ee.encodeStringBytes(c_RAW, nil) - return - } - var bs []byte - if rv.CanAddr() { - bs = rv.Slice(0, l).Bytes() - } else { - bs = make([]byte, l) - for i := 0; i < l; i++ { - bs[i] = byte(rv.Index(i).Uint()) - } - } - f.ee.encodeStringBytes(c_RAW, bs) - return - } - - if f.ti.mbs { - if l%2 == 1 { - encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) - } - f.ee.encodeMapPreamble(l / 2) - } else { - f.ee.encodeArrayPreamble(l) - } - if l == 0 { - return - } - for j := 0; j < l; j++ { - // TODO: Consider perf implication of encoding odd index values as symbols if type is string - f.e.encodeValue(rv.Index(j)) - } -} - -func (f *encFnInfo) kStruct(rv reflect.Value) { - fti := f.ti - newlen := len(fti.sfi) - rvals := make([]reflect.Value, newlen) - var encnames []string - e := f.e - tisfi := fti.sfip - toMap := !(fti.toArray || e.h.StructToArray) - // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) - if toMap { - tisfi = fti.sfi - encnames = make([]string, newlen) - } - newlen = 0 - for _, si := range tisfi { - if si.i != -1 { - rvals[newlen] = rv.Field(int(si.i)) - } else { - rvals[newlen] = rv.FieldByIndex(si.is) - } - if toMap { - if si.omitEmpty && isEmptyValue(rvals[newlen]) { - continue - } - encnames[newlen] = si.encName - } else { - if si.omitEmpty && isEmptyValue(rvals[newlen]) { - rvals[newlen] = reflect.Value{} //encode as nil - } - } - newlen++ - } - - // debugf(">>>> kStruct: newlen: %v", newlen) - if toMap { - ee := f.ee //don't dereference everytime - ee.encodeMapPreamble(newlen) - // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - for j := 0; j < newlen; j++ { - if asSymbols { - ee.encodeSymbol(encnames[j]) - } else { - ee.encodeString(c_UTF8, encnames[j]) - } - e.encodeValue(rvals[j]) - } - } else { - f.ee.encodeArrayPreamble(newlen) - for j := 0; j < newlen; j++ { - e.encodeValue(rvals[j]) - } - } -} - -// func (f *encFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") -// if rv.IsNil() { -// f.ee.encodeNil() -// return -// } -// f.e.encodeValue(rv.Elem()) -// } - -func (f *encFnInfo) kInterface(rv reflect.Value) { - if rv.IsNil() { - f.ee.encodeNil() - return - } - f.e.encodeValue(rv.Elem()) -} - -func (f *encFnInfo) kMap(rv reflect.Value) { - if rv.IsNil() { - f.ee.encodeNil() - return - } - - if shortCircuitReflectToFastPath { - switch f.ti.rtid { - case mapIntfIntfTypId: - f.e.encMapIntfIntf(rv.Interface().(map[interface{}]interface{})) - return - case mapStrIntfTypId: - f.e.encMapStrIntf(rv.Interface().(map[string]interface{})) - return - case mapStrStrTypId: - f.e.encMapStrStr(rv.Interface().(map[string]string)) - return - case mapInt64IntfTypId: - f.e.encMapInt64Intf(rv.Interface().(map[int64]interface{})) - return - case mapUint64IntfTypId: - f.e.encMapUint64Intf(rv.Interface().(map[uint64]interface{})) - return - } - } - - l := rv.Len() - f.ee.encodeMapPreamble(l) - if l == 0 { - return - } - // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String - keyTypeIsString := f.ti.rt.Key() == stringTyp - var asSymbols bool - if keyTypeIsString { - asSymbols = f.e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - } - mks := rv.MapKeys() - // for j, lmks := 0, len(mks); j < lmks; j++ { - for j := range mks { - if keyTypeIsString { - if asSymbols { - f.ee.encodeSymbol(mks[j].String()) - } else { - f.ee.encodeString(c_UTF8, mks[j].String()) - } - } else { - f.e.encodeValue(mks[j]) - } - f.e.encodeValue(rv.MapIndex(mks[j])) - } - -} - -// -------------------------------------------------- - -// encFn encapsulates the captured variables and the encode function. -// This way, we only do some calculations one times, and pass to the -// code block that should be called (encapsulated in a function) -// instead of executing the checks every time. -type encFn struct { - i *encFnInfo - f func(*encFnInfo, reflect.Value) -} - -// -------------------------------------------------- - -// An Encoder writes an object to an output stream in the codec format. -type Encoder struct { - w encWriter - e encDriver - h *BasicHandle - hh Handle - f map[uintptr]encFn - x []uintptr - s []encFn -} - -// NewEncoder returns an Encoder for encoding into an io.Writer. -// -// For efficiency, Users are encouraged to pass in a memory buffered writer -// (eg bufio.Writer, bytes.Buffer). -func NewEncoder(w io.Writer, h Handle) *Encoder { - ww, ok := w.(ioEncWriterWriter) - if !ok { - sww := simpleIoEncWriterWriter{w: w} - sww.bw, _ = w.(io.ByteWriter) - sww.sw, _ = w.(ioEncStringWriter) - ww = &sww - //ww = bufio.NewWriterSize(w, defEncByteBufSize) - } - z := ioEncWriter{ - w: ww, - } - return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} -} - -// NewEncoderBytes returns an encoder for encoding directly and efficiently -// into a byte slice, using zero-copying to temporary slices. -// -// It will potentially replace the output byte slice pointed to. -// After encoding, the out parameter contains the encoded contents. -func NewEncoderBytes(out *[]byte, h Handle) *Encoder { - in := *out - if in == nil { - in = make([]byte, defEncByteBufSize) - } - z := bytesEncWriter{ - b: in, - out: out, - } - return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} -} - -// Encode writes an object into a stream in the codec format. -// -// Encoding can be configured via the "codec" struct tag for the fields. -// -// The "codec" key in struct field's tag value is the key name, -// followed by an optional comma and options. -// -// To set an option on all fields (e.g. omitempty on all fields), you -// can create a field called _struct, and set flags on it. -// -// Struct values "usually" encode as maps. Each exported struct field is encoded unless: -// - the field's codec tag is "-", OR -// - the field is empty and its codec tag specifies the "omitempty" option. -// -// When encoding as a map, the first string in the tag (before the comma) -// is the map key string to use when encoding. -// -// However, struct values may encode as arrays. This happens when: -// - StructToArray Encode option is set, OR -// - the codec tag on the _struct field sets the "toarray" option -// -// Values with types that implement MapBySlice are encoded as stream maps. -// -// The empty values (for omitempty option) are false, 0, any nil pointer -// or interface value, and any array, slice, map, or string of length zero. -// -// Anonymous fields are encoded inline if no struct tag is present. -// Else they are encoded as regular fields. -// -// Examples: -// -// type MyStruct struct { -// _struct bool `codec:",omitempty"` //set omitempty for every field -// Field1 string `codec:"-"` //skip this field -// Field2 int `codec:"myName"` //Use key "myName" in encode stream -// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. -// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. -// ... -// } -// -// type MyStruct struct { -// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field -// //and encode struct as an array -// } -// -// The mode of encoding is based on the type of the value. When a value is seen: -// - If an extension is registered for it, call that extension function -// - If it implements BinaryMarshaler, call its MarshalBinary() (data []byte, err error) -// - Else encode it based on its reflect.Kind -// -// Note that struct field names and keys in map[string]XXX will be treated as symbols. -// Some formats support symbols (e.g. binc) and will properly encode the string -// only once in the stream, and use a tag to refer to it thereafter. -func (e *Encoder) Encode(v interface{}) (err error) { - defer panicToErr(&err) - e.encode(v) - e.w.atEndOfEncode() - return -} - -func (e *Encoder) encode(iv interface{}) { - switch v := iv.(type) { - case nil: - e.e.encodeNil() - - case reflect.Value: - e.encodeValue(v) - - case string: - e.e.encodeString(c_UTF8, v) - case bool: - e.e.encodeBool(v) - case int: - e.e.encodeInt(int64(v)) - case int8: - e.e.encodeInt(int64(v)) - case int16: - e.e.encodeInt(int64(v)) - case int32: - e.e.encodeInt(int64(v)) - case int64: - e.e.encodeInt(v) - case uint: - e.e.encodeUint(uint64(v)) - case uint8: - e.e.encodeUint(uint64(v)) - case uint16: - e.e.encodeUint(uint64(v)) - case uint32: - e.e.encodeUint(uint64(v)) - case uint64: - e.e.encodeUint(v) - case float32: - e.e.encodeFloat32(v) - case float64: - e.e.encodeFloat64(v) - - case []interface{}: - e.encSliceIntf(v) - case []string: - e.encSliceStr(v) - case []int64: - e.encSliceInt64(v) - case []uint64: - e.encSliceUint64(v) - case []uint8: - e.e.encodeStringBytes(c_RAW, v) - - case map[interface{}]interface{}: - e.encMapIntfIntf(v) - case map[string]interface{}: - e.encMapStrIntf(v) - case map[string]string: - e.encMapStrStr(v) - case map[int64]interface{}: - e.encMapInt64Intf(v) - case map[uint64]interface{}: - e.encMapUint64Intf(v) - - case *string: - e.e.encodeString(c_UTF8, *v) - case *bool: - e.e.encodeBool(*v) - case *int: - e.e.encodeInt(int64(*v)) - case *int8: - e.e.encodeInt(int64(*v)) - case *int16: - e.e.encodeInt(int64(*v)) - case *int32: - e.e.encodeInt(int64(*v)) - case *int64: - e.e.encodeInt(*v) - case *uint: - e.e.encodeUint(uint64(*v)) - case *uint8: - e.e.encodeUint(uint64(*v)) - case *uint16: - e.e.encodeUint(uint64(*v)) - case *uint32: - e.e.encodeUint(uint64(*v)) - case *uint64: - e.e.encodeUint(*v) - case *float32: - e.e.encodeFloat32(*v) - case *float64: - e.e.encodeFloat64(*v) - - case *[]interface{}: - e.encSliceIntf(*v) - case *[]string: - e.encSliceStr(*v) - case *[]int64: - e.encSliceInt64(*v) - case *[]uint64: - e.encSliceUint64(*v) - case *[]uint8: - e.e.encodeStringBytes(c_RAW, *v) - - case *map[interface{}]interface{}: - e.encMapIntfIntf(*v) - case *map[string]interface{}: - e.encMapStrIntf(*v) - case *map[string]string: - e.encMapStrStr(*v) - case *map[int64]interface{}: - e.encMapInt64Intf(*v) - case *map[uint64]interface{}: - e.encMapUint64Intf(*v) - - default: - e.encodeValue(reflect.ValueOf(iv)) - } -} - -func (e *Encoder) encodeValue(rv reflect.Value) { - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - e.e.encodeNil() - return - } - rv = rv.Elem() - } - - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - - // if e.f == nil && e.s == nil { debugf("---->Creating new enc f map for type: %v\n", rt) } - var fn encFn - var ok bool - if useMapForCodecCache { - fn, ok = e.f[rtid] - } else { - for i, v := range e.x { - if v == rtid { - fn, ok = e.s[i], true - break - } - } - } - if !ok { - // debugf("\tCreating new enc fn for type: %v\n", rt) - fi := encFnInfo{ti: getTypeInfo(rtid, rt), e: e, ee: e.e} - fn.i = &fi - if rtid == rawExtTypId { - fn.f = (*encFnInfo).rawExt - } else if e.e.isBuiltinType(rtid) { - fn.f = (*encFnInfo).builtin - } else if xfTag, xfFn := e.h.getEncodeExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfTag, xfFn - fn.f = (*encFnInfo).ext - } else if supportBinaryMarshal && fi.ti.m { - fn.f = (*encFnInfo).binaryMarshal - } else { - switch rk := rt.Kind(); rk { - case reflect.Bool: - fn.f = (*encFnInfo).kBool - case reflect.String: - fn.f = (*encFnInfo).kString - case reflect.Float64: - fn.f = (*encFnInfo).kFloat64 - case reflect.Float32: - fn.f = (*encFnInfo).kFloat32 - case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: - fn.f = (*encFnInfo).kInt - case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16: - fn.f = (*encFnInfo).kUint - case reflect.Invalid: - fn.f = (*encFnInfo).kInvalid - case reflect.Slice: - fn.f = (*encFnInfo).kSlice - case reflect.Array: - fn.f = (*encFnInfo).kArray - case reflect.Struct: - fn.f = (*encFnInfo).kStruct - // case reflect.Ptr: - // fn.f = (*encFnInfo).kPtr - case reflect.Interface: - fn.f = (*encFnInfo).kInterface - case reflect.Map: - fn.f = (*encFnInfo).kMap - default: - fn.f = (*encFnInfo).kErr - } - } - if useMapForCodecCache { - if e.f == nil { - e.f = make(map[uintptr]encFn, 16) - } - e.f[rtid] = fn - } else { - e.s = append(e.s, fn) - e.x = append(e.x, rtid) - } - } - - fn.f(fn.i, rv) - -} - -func (e *Encoder) encRawExt(re RawExt) { - if re.Data == nil { - e.e.encodeNil() - return - } - if e.hh.writeExt() { - e.e.encodeExtPreamble(re.Tag, len(re.Data)) - e.w.writeb(re.Data) - } else { - e.e.encodeStringBytes(c_RAW, re.Data) - } -} - -// --------------------------------------------- -// short circuit functions for common maps and slices - -func (e *Encoder) encSliceIntf(v []interface{}) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.encode(v2) - } -} - -func (e *Encoder) encSliceStr(v []string) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeString(c_UTF8, v2) - } -} - -func (e *Encoder) encSliceInt64(v []int64) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeInt(v2) - } -} - -func (e *Encoder) encSliceUint64(v []uint64) { - e.e.encodeArrayPreamble(len(v)) - for _, v2 := range v { - e.e.encodeUint(v2) - } -} - -func (e *Encoder) encMapStrStr(v map[string]string) { - e.e.encodeMapPreamble(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - for k2, v2 := range v { - if asSymbols { - e.e.encodeSymbol(k2) - } else { - e.e.encodeString(c_UTF8, k2) - } - e.e.encodeString(c_UTF8, v2) - } -} - -func (e *Encoder) encMapStrIntf(v map[string]interface{}) { - e.e.encodeMapPreamble(len(v)) - asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - for k2, v2 := range v { - if asSymbols { - e.e.encodeSymbol(k2) - } else { - e.e.encodeString(c_UTF8, k2) - } - e.encode(v2) - } -} - -func (e *Encoder) encMapInt64Intf(v map[int64]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.e.encodeInt(k2) - e.encode(v2) - } -} - -func (e *Encoder) encMapUint64Intf(v map[uint64]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.e.encodeUint(uint64(k2)) - e.encode(v2) - } -} - -func (e *Encoder) encMapIntfIntf(v map[interface{}]interface{}) { - e.e.encodeMapPreamble(len(v)) - for k2, v2 := range v { - e.encode(k2) - e.encode(v2) - } -} - -// ---------------------------------------- - -func encErr(format string, params ...interface{}) { - doPanic(msgTagEnc, format, params...) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go deleted file mode 100644 index bdf448d52..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// //+build ignore - -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -// This file includes benchmarks which have dependencies on 3rdparty -// packages (bson and vmihailenco/msgpack) which must be installed locally. -// -// To run the benchmarks including these 3rdparty packages, first -// - Uncomment first line in this file (put // // in front of it) -// - Get those packages: -// go get github.com/vmihailenco/msgpack -// go get labix.org/v2/mgo/bson -// - Run: -// go test -bi -bench=. - -import ( - "testing" - - vmsgpack "gopkg.in/vmihailenco/msgpack.v2" - "labix.org/v2/mgo/bson" -) - -func init() { - benchCheckers = append(benchCheckers, - benchChecker{"v-msgpack", fnVMsgpackEncodeFn, fnVMsgpackDecodeFn}, - benchChecker{"bson", fnBsonEncodeFn, fnBsonDecodeFn}, - ) -} - -func fnVMsgpackEncodeFn(ts interface{}) ([]byte, error) { - return vmsgpack.Marshal(ts) -} - -func fnVMsgpackDecodeFn(buf []byte, ts interface{}) error { - return vmsgpack.Unmarshal(buf, ts) -} - -func fnBsonEncodeFn(ts interface{}) ([]byte, error) { - return bson.Marshal(ts) -} - -func fnBsonDecodeFn(buf []byte, ts interface{}) error { - return bson.Unmarshal(buf, ts) -} - -func Benchmark__Bson_______Encode(b *testing.B) { - fnBenchmarkEncode(b, "bson", benchTs, fnBsonEncodeFn) -} - -func Benchmark__Bson_______Decode(b *testing.B) { - fnBenchmarkDecode(b, "bson", benchTs, fnBsonEncodeFn, fnBsonDecodeFn, fnBenchNewTs) -} - -func Benchmark__VMsgpack___Encode(b *testing.B) { - fnBenchmarkEncode(b, "v-msgpack", benchTs, fnVMsgpackEncodeFn) -} - -func Benchmark__VMsgpack___Decode(b *testing.B) { - fnBenchmarkDecode(b, "v-msgpack", benchTs, fnVMsgpackEncodeFn, fnVMsgpackDecodeFn, fnBenchNewTs) -} - -func TestMsgpackPythonGenStreams(t *testing.T) { - doTestMsgpackPythonGenStreams(t) -} - -func TestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) { - doTestMsgpackRpcSpecGoClientToPythonSvc(t) -} - -func TestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) { - doTestMsgpackRpcSpecPythonClientToGoSvc(t) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper.go deleted file mode 100644 index e6dc0563f..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper.go +++ /dev/null @@ -1,589 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -// Contains code shared by both encode and decode. - -import ( - "encoding/binary" - "fmt" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" - "unicode" - "unicode/utf8" -) - -const ( - structTagName = "codec" - - // Support - // encoding.BinaryMarshaler: MarshalBinary() (data []byte, err error) - // encoding.BinaryUnmarshaler: UnmarshalBinary(data []byte) error - // This constant flag will enable or disable it. - supportBinaryMarshal = true - - // Each Encoder or Decoder uses a cache of functions based on conditionals, - // so that the conditionals are not run every time. - // - // Either a map or a slice is used to keep track of the functions. - // The map is more natural, but has a higher cost than a slice/array. - // This flag (useMapForCodecCache) controls which is used. - useMapForCodecCache = false - - // For some common container types, we can short-circuit an elaborate - // reflection dance and call encode/decode directly. - // The currently supported types are: - // - slices of strings, or id's (int64,uint64) or interfaces. - // - maps of str->str, str->intf, id(int64,uint64)->intf, intf->intf - shortCircuitReflectToFastPath = true - - // for debugging, set this to false, to catch panic traces. - // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. - recoverPanicToErr = true -) - -type charEncoding uint8 - -const ( - c_RAW charEncoding = iota - c_UTF8 - c_UTF16LE - c_UTF16BE - c_UTF32LE - c_UTF32BE -) - -// valueType is the stream type -type valueType uint8 - -const ( - valueTypeUnset valueType = iota - valueTypeNil - valueTypeInt - valueTypeUint - valueTypeFloat - valueTypeBool - valueTypeString - valueTypeSymbol - valueTypeBytes - valueTypeMap - valueTypeArray - valueTypeTimestamp - valueTypeExt - - valueTypeInvalid = 0xff -) - -var ( - bigen = binary.BigEndian - structInfoFieldName = "_struct" - - cachedTypeInfo = make(map[uintptr]*typeInfo, 4) - cachedTypeInfoMutex sync.RWMutex - - intfSliceTyp = reflect.TypeOf([]interface{}(nil)) - intfTyp = intfSliceTyp.Elem() - - strSliceTyp = reflect.TypeOf([]string(nil)) - boolSliceTyp = reflect.TypeOf([]bool(nil)) - uintSliceTyp = reflect.TypeOf([]uint(nil)) - uint8SliceTyp = reflect.TypeOf([]uint8(nil)) - uint16SliceTyp = reflect.TypeOf([]uint16(nil)) - uint32SliceTyp = reflect.TypeOf([]uint32(nil)) - uint64SliceTyp = reflect.TypeOf([]uint64(nil)) - intSliceTyp = reflect.TypeOf([]int(nil)) - int8SliceTyp = reflect.TypeOf([]int8(nil)) - int16SliceTyp = reflect.TypeOf([]int16(nil)) - int32SliceTyp = reflect.TypeOf([]int32(nil)) - int64SliceTyp = reflect.TypeOf([]int64(nil)) - float32SliceTyp = reflect.TypeOf([]float32(nil)) - float64SliceTyp = reflect.TypeOf([]float64(nil)) - - mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) - mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) - mapStrStrTyp = reflect.TypeOf(map[string]string(nil)) - - mapIntIntfTyp = reflect.TypeOf(map[int]interface{}(nil)) - mapInt64IntfTyp = reflect.TypeOf(map[int64]interface{}(nil)) - mapUintIntfTyp = reflect.TypeOf(map[uint]interface{}(nil)) - mapUint64IntfTyp = reflect.TypeOf(map[uint64]interface{}(nil)) - - stringTyp = reflect.TypeOf("") - timeTyp = reflect.TypeOf(time.Time{}) - rawExtTyp = reflect.TypeOf(RawExt{}) - - mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() - binaryMarshalerTyp = reflect.TypeOf((*binaryMarshaler)(nil)).Elem() - binaryUnmarshalerTyp = reflect.TypeOf((*binaryUnmarshaler)(nil)).Elem() - - rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() - intfTypId = reflect.ValueOf(intfTyp).Pointer() - timeTypId = reflect.ValueOf(timeTyp).Pointer() - - intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() - strSliceTypId = reflect.ValueOf(strSliceTyp).Pointer() - - boolSliceTypId = reflect.ValueOf(boolSliceTyp).Pointer() - uintSliceTypId = reflect.ValueOf(uintSliceTyp).Pointer() - uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() - uint16SliceTypId = reflect.ValueOf(uint16SliceTyp).Pointer() - uint32SliceTypId = reflect.ValueOf(uint32SliceTyp).Pointer() - uint64SliceTypId = reflect.ValueOf(uint64SliceTyp).Pointer() - intSliceTypId = reflect.ValueOf(intSliceTyp).Pointer() - int8SliceTypId = reflect.ValueOf(int8SliceTyp).Pointer() - int16SliceTypId = reflect.ValueOf(int16SliceTyp).Pointer() - int32SliceTypId = reflect.ValueOf(int32SliceTyp).Pointer() - int64SliceTypId = reflect.ValueOf(int64SliceTyp).Pointer() - float32SliceTypId = reflect.ValueOf(float32SliceTyp).Pointer() - float64SliceTypId = reflect.ValueOf(float64SliceTyp).Pointer() - - mapStrStrTypId = reflect.ValueOf(mapStrStrTyp).Pointer() - mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() - mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() - mapIntIntfTypId = reflect.ValueOf(mapIntIntfTyp).Pointer() - mapInt64IntfTypId = reflect.ValueOf(mapInt64IntfTyp).Pointer() - mapUintIntfTypId = reflect.ValueOf(mapUintIntfTyp).Pointer() - mapUint64IntfTypId = reflect.ValueOf(mapUint64IntfTyp).Pointer() - // Id = reflect.ValueOf().Pointer() - // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() - - binaryMarshalerTypId = reflect.ValueOf(binaryMarshalerTyp).Pointer() - binaryUnmarshalerTypId = reflect.ValueOf(binaryUnmarshalerTyp).Pointer() - - intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) - uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) - - bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} - bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} -) - -type binaryUnmarshaler interface { - UnmarshalBinary(data []byte) error -} - -type binaryMarshaler interface { - MarshalBinary() (data []byte, err error) -} - -// MapBySlice represents a slice which should be encoded as a map in the stream. -// The slice contains a sequence of key-value pairs. -type MapBySlice interface { - MapBySlice() -} - -// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. -// -// BasicHandle encapsulates the common options and extension functions. -type BasicHandle struct { - extHandle - EncodeOptions - DecodeOptions -} - -// Handle is the interface for a specific encoding format. -// -// Typically, a Handle is pre-configured before first time use, -// and not modified while in use. Such a pre-configured Handle -// is safe for concurrent access. -type Handle interface { - writeExt() bool - getBasicHandle() *BasicHandle - newEncDriver(w encWriter) encDriver - newDecDriver(r decReader) decDriver -} - -// RawExt represents raw unprocessed extension data. -type RawExt struct { - Tag byte - Data []byte -} - -type extTypeTagFn struct { - rtid uintptr - rt reflect.Type - tag byte - encFn func(reflect.Value) ([]byte, error) - decFn func(reflect.Value, []byte) error -} - -type extHandle []*extTypeTagFn - -// AddExt registers an encode and decode function for a reflect.Type. -// Note that the type must be a named type, and specifically not -// a pointer or Interface. An error is returned if that is not honored. -// -// To Deregister an ext, call AddExt with 0 tag, nil encfn and nil decfn. -func (o *extHandle) AddExt( - rt reflect.Type, - tag byte, - encfn func(reflect.Value) ([]byte, error), - decfn func(reflect.Value, []byte) error, -) (err error) { - // o is a pointer, because we may need to initialize it - if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { - err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", - reflect.Zero(rt).Interface()) - return - } - - // o cannot be nil, since it is always embedded in a Handle. - // if nil, let it panic. - // if o == nil { - // err = errors.New("codec.Handle.AddExt: extHandle cannot be a nil pointer.") - // return - // } - - rtid := reflect.ValueOf(rt).Pointer() - for _, v := range *o { - if v.rtid == rtid { - v.tag, v.encFn, v.decFn = tag, encfn, decfn - return - } - } - - *o = append(*o, &extTypeTagFn{rtid, rt, tag, encfn, decfn}) - return -} - -func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { - for _, v := range o { - if v.rtid == rtid { - return v - } - } - return nil -} - -func (o extHandle) getExtForTag(tag byte) *extTypeTagFn { - for _, v := range o { - if v.tag == tag { - return v - } - } - return nil -} - -func (o extHandle) getDecodeExtForTag(tag byte) ( - rv reflect.Value, fn func(reflect.Value, []byte) error) { - if x := o.getExtForTag(tag); x != nil { - // ext is only registered for base - rv = reflect.New(x.rt).Elem() - fn = x.decFn - } - return -} - -func (o extHandle) getDecodeExt(rtid uintptr) (tag byte, fn func(reflect.Value, []byte) error) { - if x := o.getExt(rtid); x != nil { - tag = x.tag - fn = x.decFn - } - return -} - -func (o extHandle) getEncodeExt(rtid uintptr) (tag byte, fn func(reflect.Value) ([]byte, error)) { - if x := o.getExt(rtid); x != nil { - tag = x.tag - fn = x.encFn - } - return -} - -type structFieldInfo struct { - encName string // encode name - - // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. - - is []int // (recursive/embedded) field index in struct - i int16 // field index in struct - omitEmpty bool - toArray bool // if field is _struct, is the toArray set? - - // tag string // tag - // name string // field name - // encNameBs []byte // encoded name as byte stream - // ikind int // kind of the field as an int i.e. int(reflect.Kind) -} - -func parseStructFieldInfo(fname string, stag string) *structFieldInfo { - if fname == "" { - panic("parseStructFieldInfo: No Field Name") - } - si := structFieldInfo{ - // name: fname, - encName: fname, - // tag: stag, - } - - if stag != "" { - for i, s := range strings.Split(stag, ",") { - if i == 0 { - if s != "" { - si.encName = s - } - } else { - switch s { - case "omitempty": - si.omitEmpty = true - case "toarray": - si.toArray = true - } - } - } - } - // si.encNameBs = []byte(si.encName) - return &si -} - -type sfiSortedByEncName []*structFieldInfo - -func (p sfiSortedByEncName) Len() int { - return len(p) -} - -func (p sfiSortedByEncName) Less(i, j int) bool { - return p[i].encName < p[j].encName -} - -func (p sfiSortedByEncName) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -// typeInfo keeps information about each type referenced in the encode/decode sequence. -// -// During an encode/decode sequence, we work as below: -// - If base is a built in type, en/decode base value -// - If base is registered as an extension, en/decode base value -// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method -// - Else decode appropriately based on the reflect.Kind -type typeInfo struct { - sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. - sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. - - rt reflect.Type - rtid uintptr - - // baseId gives pointer to the base reflect.Type, after deferencing - // the pointers. E.g. base type of ***time.Time is time.Time. - base reflect.Type - baseId uintptr - baseIndir int8 // number of indirections to get to base - - mbs bool // base type (T or *T) is a MapBySlice - - m bool // base type (T or *T) is a binaryMarshaler - unm bool // base type (T or *T) is a binaryUnmarshaler - mIndir int8 // number of indirections to get to binaryMarshaler type - unmIndir int8 // number of indirections to get to binaryUnmarshaler type - toArray bool // whether this (struct) type should be encoded as an array -} - -func (ti *typeInfo) indexForEncName(name string) int { - //tisfi := ti.sfi - const binarySearchThreshold = 16 - if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { - // linear search. faster than binary search in my testing up to 16-field structs. - for i, si := range ti.sfi { - if si.encName == name { - return i - } - } - } else { - // binary search. adapted from sort/search.go. - h, i, j := 0, 0, sfilen - for i < j { - h = i + (j-i)/2 - if ti.sfi[h].encName < name { - i = h + 1 - } else { - j = h - } - } - if i < sfilen && ti.sfi[i].encName == name { - return i - } - } - return -1 -} - -func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - var ok bool - cachedTypeInfoMutex.RLock() - pti, ok = cachedTypeInfo[rtid] - cachedTypeInfoMutex.RUnlock() - if ok { - return - } - - cachedTypeInfoMutex.Lock() - defer cachedTypeInfoMutex.Unlock() - if pti, ok = cachedTypeInfo[rtid]; ok { - return - } - - ti := typeInfo{rt: rt, rtid: rtid} - pti = &ti - - var indir int8 - if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { - ti.m, ti.mIndir = true, indir - } - if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { - ti.unm, ti.unmIndir = true, indir - } - if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { - ti.mbs = true - } - - pt := rt - var ptIndir int8 - // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } - for pt.Kind() == reflect.Ptr { - pt = pt.Elem() - ptIndir++ - } - if ptIndir == 0 { - ti.base = rt - ti.baseId = rtid - } else { - ti.base = pt - ti.baseId = reflect.ValueOf(pt).Pointer() - ti.baseIndir = ptIndir - } - - if rt.Kind() == reflect.Struct { - var siInfo *structFieldInfo - if f, ok := rt.FieldByName(structInfoFieldName); ok { - siInfo = parseStructFieldInfo(structInfoFieldName, f.Tag.Get(structTagName)) - ti.toArray = siInfo.toArray - } - sfip := make([]*structFieldInfo, 0, rt.NumField()) - rgetTypeInfo(rt, nil, make(map[string]bool), &sfip, siInfo) - - // // try to put all si close together - // const tryToPutAllStructFieldInfoTogether = true - // if tryToPutAllStructFieldInfoTogether { - // sfip2 := make([]structFieldInfo, len(sfip)) - // for i, si := range sfip { - // sfip2[i] = *si - // } - // for i := range sfip { - // sfip[i] = &sfip2[i] - // } - // } - - ti.sfip = make([]*structFieldInfo, len(sfip)) - ti.sfi = make([]*structFieldInfo, len(sfip)) - copy(ti.sfip, sfip) - sort.Sort(sfiSortedByEncName(sfip)) - copy(ti.sfi, sfip) - } - // sfi = sfip - cachedTypeInfo[rtid] = pti - return -} - -func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, - sfi *[]*structFieldInfo, siInfo *structFieldInfo, -) { - // for rt.Kind() == reflect.Ptr { - // // indexstack = append(indexstack, 0) - // rt = rt.Elem() - // } - for j := 0; j < rt.NumField(); j++ { - f := rt.Field(j) - stag := f.Tag.Get(structTagName) - if stag == "-" { - continue - } - if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { - continue - } - // if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it. - if f.Anonymous && stag == "" { - ft := f.Type - for ft.Kind() == reflect.Ptr { - ft = ft.Elem() - } - if ft.Kind() == reflect.Struct { - indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo) - continue - } - } - // do not let fields with same name in embedded structs override field at higher level. - // this must be done after anonymous check, to allow anonymous field - // still include their child fields - if _, ok := fnameToHastag[f.Name]; ok { - continue - } - si := parseStructFieldInfo(f.Name, stag) - // si.ikind = int(f.Type.Kind()) - if len(indexstack) == 0 { - si.i = int16(j) - } else { - si.i = -1 - si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - } - - if siInfo != nil { - if siInfo.omitEmpty { - si.omitEmpty = true - } - } - *sfi = append(*sfi, si) - fnameToHastag[f.Name] = stag != "" - } -} - -func panicToErr(err *error) { - if recoverPanicToErr { - if x := recover(); x != nil { - //debug.PrintStack() - panicValToErr(x, err) - } - } -} - -func doPanic(tag string, format string, params ...interface{}) { - params2 := make([]interface{}, len(params)+1) - params2[0] = tag - copy(params2[1:], params) - panic(fmt.Errorf("%s: "+format, params2...)) -} - -func checkOverflowFloat32(f float64, doCheck bool) { - if !doCheck { - return - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowFloat() - f2 := f - if f2 < 0 { - f2 = -f - } - if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 { - decErr("Overflow float32 value: %v", f2) - } -} - -func checkOverflow(ui uint64, i int64, bitsize uint8) { - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize == 0 { - return - } - if i != 0 { - if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { - decErr("Overflow int value: %v", i) - } - } - if ui != 0 { - if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { - decErr("Overflow uint value: %v", ui) - } - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper_internal.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper_internal.go deleted file mode 100644 index 58417da95..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/helper_internal.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -// All non-std package dependencies live in this file, -// so porting to different environment is easy (just update functions). - -import ( - "errors" - "fmt" - "math" - "reflect" -) - -var ( - raisePanicAfterRecover = false - debugging = true -) - -func panicValToErr(panicVal interface{}, err *error) { - switch xerr := panicVal.(type) { - case error: - *err = xerr - case string: - *err = errors.New(xerr) - default: - *err = fmt.Errorf("%v", panicVal) - } - if raisePanicAfterRecover { - panic(panicVal) - } - return -} - -func isEmptyValueDeref(v reflect.Value, deref bool) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if deref { - if v.IsNil() { - return true - } - return isEmptyValueDeref(v.Elem(), deref) - } else { - return v.IsNil() - } - case reflect.Struct: - // return true if all fields are empty. else return false. - - // we cannot use equality check, because some fields may be maps/slices/etc - // and consequently the structs are not comparable. - // return v.Interface() == reflect.Zero(v.Type()).Interface() - for i, n := 0, v.NumField(); i < n; i++ { - if !isEmptyValueDeref(v.Field(i), deref) { - return false - } - } - return true - } - return false -} - -func isEmptyValue(v reflect.Value) bool { - return isEmptyValueDeref(v, true) -} - -func debugf(format string, args ...interface{}) { - if debugging { - if len(format) == 0 || format[len(format)-1] != '\n' { - format = format + "\n" - } - fmt.Printf(format, args...) - } -} - -func pruneSignExt(v []byte, pos bool) (n int) { - if len(v) < 2 { - } else if pos && v[0] == 0 { - for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { - } - } else if !pos && v[0] == 0xff { - for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { - } - } - return -} - -func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { - if typ == nil { - return - } - rt := typ - // The type might be a pointer and we need to keep - // dereferencing to the base type until we find an implementation. - for { - if rt.Implements(iTyp) { - return true, indir - } - if p := rt; p.Kind() == reflect.Ptr { - indir++ - if indir >= math.MaxInt8 { // insane number of indirections - return false, 0 - } - rt = p.Elem() - continue - } - break - } - // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. - if typ.Kind() != reflect.Ptr { - // Not a pointer, but does the pointer work? - if reflect.PtrTo(typ).Implements(iTyp) { - return true, -1 - } - } - return false, 0 -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack.go deleted file mode 100644 index da0500d19..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack.go +++ /dev/null @@ -1,816 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -/* -MSGPACK - -Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. -We need to maintain compatibility with it and how it encodes integer values -without caring about the type. - -For compatibility with behaviour of msgpack-c reference implementation: - - Go intX (>0) and uintX - IS ENCODED AS - msgpack +ve fixnum, unsigned - - Go intX (<0) - IS ENCODED AS - msgpack -ve fixnum, signed - -*/ -package codec - -import ( - "fmt" - "io" - "math" - "net/rpc" -) - -const ( - mpPosFixNumMin byte = 0x00 - mpPosFixNumMax = 0x7f - mpFixMapMin = 0x80 - mpFixMapMax = 0x8f - mpFixArrayMin = 0x90 - mpFixArrayMax = 0x9f - mpFixStrMin = 0xa0 - mpFixStrMax = 0xbf - mpNil = 0xc0 - _ = 0xc1 - mpFalse = 0xc2 - mpTrue = 0xc3 - mpFloat = 0xca - mpDouble = 0xcb - mpUint8 = 0xcc - mpUint16 = 0xcd - mpUint32 = 0xce - mpUint64 = 0xcf - mpInt8 = 0xd0 - mpInt16 = 0xd1 - mpInt32 = 0xd2 - mpInt64 = 0xd3 - - // extensions below - mpBin8 = 0xc4 - mpBin16 = 0xc5 - mpBin32 = 0xc6 - mpExt8 = 0xc7 - mpExt16 = 0xc8 - mpExt32 = 0xc9 - mpFixExt1 = 0xd4 - mpFixExt2 = 0xd5 - mpFixExt4 = 0xd6 - mpFixExt8 = 0xd7 - mpFixExt16 = 0xd8 - - mpStr8 = 0xd9 // new - mpStr16 = 0xda - mpStr32 = 0xdb - - mpArray16 = 0xdc - mpArray32 = 0xdd - - mpMap16 = 0xde - mpMap32 = 0xdf - - mpNegFixNumMin = 0xe0 - mpNegFixNumMax = 0xff -) - -// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec -// that the backend RPC service takes multiple arguments, which have been arranged -// in sequence in the slice. -// -// The Codec then passes it AS-IS to the rpc service (without wrapping it in an -// array of 1 element). -type MsgpackSpecRpcMultiArgs []interface{} - -// A MsgpackContainer type specifies the different types of msgpackContainers. -type msgpackContainerType struct { - fixCutoff int - bFixMin, b8, b16, b32 byte - hasFixMin, has8, has8Always bool -} - -var ( - msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} - msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} - msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} - msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} -) - -//--------------------------------------------- - -type msgpackEncDriver struct { - w encWriter - h *MsgpackHandle -} - -func (e *msgpackEncDriver) isBuiltinType(rt uintptr) bool { - //no builtin types. All encodings are based on kinds. Types supported as extensions. - return false -} - -func (e *msgpackEncDriver) encodeBuiltin(rt uintptr, v interface{}) {} - -func (e *msgpackEncDriver) encodeNil() { - e.w.writen1(mpNil) -} - -func (e *msgpackEncDriver) encodeInt(i int64) { - - switch { - case i >= 0: - e.encodeUint(uint64(i)) - case i >= -32: - e.w.writen1(byte(i)) - case i >= math.MinInt8: - e.w.writen2(mpInt8, byte(i)) - case i >= math.MinInt16: - e.w.writen1(mpInt16) - e.w.writeUint16(uint16(i)) - case i >= math.MinInt32: - e.w.writen1(mpInt32) - e.w.writeUint32(uint32(i)) - default: - e.w.writen1(mpInt64) - e.w.writeUint64(uint64(i)) - } -} - -func (e *msgpackEncDriver) encodeUint(i uint64) { - switch { - case i <= math.MaxInt8: - e.w.writen1(byte(i)) - case i <= math.MaxUint8: - e.w.writen2(mpUint8, byte(i)) - case i <= math.MaxUint16: - e.w.writen1(mpUint16) - e.w.writeUint16(uint16(i)) - case i <= math.MaxUint32: - e.w.writen1(mpUint32) - e.w.writeUint32(uint32(i)) - default: - e.w.writen1(mpUint64) - e.w.writeUint64(uint64(i)) - } -} - -func (e *msgpackEncDriver) encodeBool(b bool) { - if b { - e.w.writen1(mpTrue) - } else { - e.w.writen1(mpFalse) - } -} - -func (e *msgpackEncDriver) encodeFloat32(f float32) { - e.w.writen1(mpFloat) - e.w.writeUint32(math.Float32bits(f)) -} - -func (e *msgpackEncDriver) encodeFloat64(f float64) { - e.w.writen1(mpDouble) - e.w.writeUint64(math.Float64bits(f)) -} - -func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { - switch { - case l == 1: - e.w.writen2(mpFixExt1, xtag) - case l == 2: - e.w.writen2(mpFixExt2, xtag) - case l == 4: - e.w.writen2(mpFixExt4, xtag) - case l == 8: - e.w.writen2(mpFixExt8, xtag) - case l == 16: - e.w.writen2(mpFixExt16, xtag) - case l < 256: - e.w.writen2(mpExt8, byte(l)) - e.w.writen1(xtag) - case l < 65536: - e.w.writen1(mpExt16) - e.w.writeUint16(uint16(l)) - e.w.writen1(xtag) - default: - e.w.writen1(mpExt32) - e.w.writeUint32(uint32(l)) - e.w.writen1(xtag) - } -} - -func (e *msgpackEncDriver) encodeArrayPreamble(length int) { - e.writeContainerLen(msgpackContainerList, length) -} - -func (e *msgpackEncDriver) encodeMapPreamble(length int) { - e.writeContainerLen(msgpackContainerMap, length) -} - -func (e *msgpackEncDriver) encodeString(c charEncoding, s string) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(s)) - } else { - e.writeContainerLen(msgpackContainerStr, len(s)) - } - if len(s) > 0 { - e.w.writestr(s) - } -} - -func (e *msgpackEncDriver) encodeSymbol(v string) { - e.encodeString(c_UTF8, v) -} - -func (e *msgpackEncDriver) encodeStringBytes(c charEncoding, bs []byte) { - if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(bs)) - } else { - e.writeContainerLen(msgpackContainerStr, len(bs)) - } - if len(bs) > 0 { - e.w.writeb(bs) - } -} - -func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { - switch { - case ct.hasFixMin && l < ct.fixCutoff: - e.w.writen1(ct.bFixMin | byte(l)) - case ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt): - e.w.writen2(ct.b8, uint8(l)) - case l < 65536: - e.w.writen1(ct.b16) - e.w.writeUint16(uint16(l)) - default: - e.w.writen1(ct.b32) - e.w.writeUint32(uint32(l)) - } -} - -//--------------------------------------------- - -type msgpackDecDriver struct { - r decReader - h *MsgpackHandle - bd byte - bdRead bool - bdType valueType -} - -func (d *msgpackDecDriver) isBuiltinType(rt uintptr) bool { - //no builtin types. All encodings are based on kinds. Types supported as extensions. - return false -} - -func (d *msgpackDecDriver) decodeBuiltin(rt uintptr, v interface{}) {} - -// Note: This returns either a primitive (int, bool, etc) for non-containers, -// or a containerType, or a specific type denoting nil or extension. -// It is called when a nil interface{} is passed, leaving it up to the DecDriver -// to introspect the stream and decide how best to decode. -// It deciphers the value by looking at the stream first. -func (d *msgpackDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() - bd := d.bd - - switch bd { - case mpNil: - vt = valueTypeNil - d.bdRead = false - case mpFalse: - vt = valueTypeBool - v = false - case mpTrue: - vt = valueTypeBool - v = true - - case mpFloat: - vt = valueTypeFloat - v = float64(math.Float32frombits(d.r.readUint32())) - case mpDouble: - vt = valueTypeFloat - v = math.Float64frombits(d.r.readUint64()) - - case mpUint8: - vt = valueTypeUint - v = uint64(d.r.readn1()) - case mpUint16: - vt = valueTypeUint - v = uint64(d.r.readUint16()) - case mpUint32: - vt = valueTypeUint - v = uint64(d.r.readUint32()) - case mpUint64: - vt = valueTypeUint - v = uint64(d.r.readUint64()) - - case mpInt8: - vt = valueTypeInt - v = int64(int8(d.r.readn1())) - case mpInt16: - vt = valueTypeInt - v = int64(int16(d.r.readUint16())) - case mpInt32: - vt = valueTypeInt - v = int64(int32(d.r.readUint32())) - case mpInt64: - vt = valueTypeInt - v = int64(int64(d.r.readUint64())) - - default: - switch { - case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: - // positive fixnum (always signed) - vt = valueTypeInt - v = int64(int8(bd)) - case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: - // negative fixnum - vt = valueTypeInt - v = int64(int8(bd)) - case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.RawToString { - var rvm string - vt = valueTypeString - v = &rvm - } else { - var rvm = []byte{} - vt = valueTypeBytes - v = &rvm - } - decodeFurther = true - case bd == mpBin8, bd == mpBin16, bd == mpBin32: - var rvm = []byte{} - vt = valueTypeBytes - v = &rvm - decodeFurther = true - case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - vt = valueTypeArray - decodeFurther = true - case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - vt = valueTypeMap - decodeFurther = true - case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: - clen := d.readExtLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(clen) - v = &re - vt = valueTypeExt - default: - decErr("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } - } - if !decodeFurther { - d.bdRead = false - } - return -} - -// int can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { - switch d.bd { - case mpUint8: - i = int64(uint64(d.r.readn1())) - case mpUint16: - i = int64(uint64(d.r.readUint16())) - case mpUint32: - i = int64(uint64(d.r.readUint32())) - case mpUint64: - i = int64(d.r.readUint64()) - case mpInt8: - i = int64(int8(d.r.readn1())) - case mpInt16: - i = int64(int16(d.r.readUint16())) - case mpInt32: - i = int64(int32(d.r.readUint32())) - case mpInt64: - i = int64(d.r.readUint64()) - default: - switch { - case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: - i = int64(int8(d.bd)) - case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - i = int64(int8(d.bd)) - default: - decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { - decErr("Overflow int value: %v", i) - } - } - d.bdRead = false - return -} - -// uint can be decoded from msgpack type: intXXX or uintXXX -func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) { - switch d.bd { - case mpUint8: - ui = uint64(d.r.readn1()) - case mpUint16: - ui = uint64(d.r.readUint16()) - case mpUint32: - ui = uint64(d.r.readUint32()) - case mpUint64: - ui = d.r.readUint64() - case mpInt8: - if i := int64(int8(d.r.readn1())); i >= 0 { - ui = uint64(i) - } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - case mpInt16: - if i := int64(int16(d.r.readUint16())); i >= 0 { - ui = uint64(i) - } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - case mpInt32: - if i := int64(int32(d.r.readUint32())); i >= 0 { - ui = uint64(i) - } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - case mpInt64: - if i := int64(d.r.readUint64()); i >= 0 { - ui = uint64(i) - } else { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - default: - switch { - case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: - ui = uint64(d.bd) - case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: - decErr("Assigning negative signed value: %v, to unsigned type", int(d.bd)) - default: - decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) - } - } - // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() - if bitsize > 0 { - if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { - decErr("Overflow uint value: %v", ui) - } - } - d.bdRead = false - return -} - -// float can either be decoded from msgpack type: float, double or intX -func (d *msgpackDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.bd { - case mpFloat: - f = float64(math.Float32frombits(d.r.readUint32())) - case mpDouble: - f = math.Float64frombits(d.r.readUint64()) - default: - f = float64(d.decodeInt(0)) - } - checkOverflowFloat32(f, chkOverflow32) - d.bdRead = false - return -} - -// bool can be decoded from bool, fixnum 0 or 1. -func (d *msgpackDecDriver) decodeBool() (b bool) { - switch d.bd { - case mpFalse, 0: - // b = false - case mpTrue, 1: - b = true - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - } - d.bdRead = false - return -} - -func (d *msgpackDecDriver) decodeString() (s string) { - clen := d.readContainerLen(msgpackContainerStr) - if clen > 0 { - s = string(d.r.readn(clen)) - } - d.bdRead = false - return -} - -// Callers must check if changed=true (to decide whether to replace the one they have) -func (d *msgpackDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { - // bytes can be decoded from msgpackContainerStr or msgpackContainerBin - var clen int - switch d.bd { - case mpBin8, mpBin16, mpBin32: - clen = d.readContainerLen(msgpackContainerBin) - default: - clen = d.readContainerLen(msgpackContainerStr) - } - // if clen < 0 { - // changed = true - // panic("length cannot be zero. this cannot be nil.") - // } - if clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - // Return changed=true if length of passed slice diff from length of bytes in stream - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true - } - d.r.readb(bs) - } - d.bdRead = false - return -} - -// Every top-level decode funcs (i.e. decodeValue, decode) must call this first. -func (d *msgpackDecDriver) initReadNext() { - if d.bdRead { - return - } - d.bd = d.r.readn1() - d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *msgpackDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - bd := d.bd - switch bd { - case mpNil: - d.bdType = valueTypeNil - case mpFalse, mpTrue: - d.bdType = valueTypeBool - case mpFloat, mpDouble: - d.bdType = valueTypeFloat - case mpUint8, mpUint16, mpUint32, mpUint64: - d.bdType = valueTypeUint - case mpInt8, mpInt16, mpInt32, mpInt64: - d.bdType = valueTypeInt - default: - switch { - case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: - d.bdType = valueTypeInt - case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: - d.bdType = valueTypeInt - case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: - if d.h.RawToString { - d.bdType = valueTypeString - } else { - d.bdType = valueTypeBytes - } - case bd == mpBin8, bd == mpBin16, bd == mpBin32: - d.bdType = valueTypeBytes - case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: - d.bdType = valueTypeArray - case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: - d.bdType = valueTypeMap - case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: - d.bdType = valueTypeExt - default: - decErr("currentEncodedType: Undeciphered descriptor: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } - } - } - return d.bdType -} - -func (d *msgpackDecDriver) tryDecodeAsNil() bool { - if d.bd == mpNil { - d.bdRead = false - return true - } - return false -} - -func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { - bd := d.bd - switch { - case bd == mpNil: - clen = -1 // to represent nil - case bd == ct.b8: - clen = int(d.r.readn1()) - case bd == ct.b16: - clen = int(d.r.readUint16()) - case bd == ct.b32: - clen = int(d.r.readUint32()) - case (ct.bFixMin & bd) == ct.bFixMin: - clen = int(ct.bFixMin ^ bd) - default: - decErr("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) - } - d.bdRead = false - return -} - -func (d *msgpackDecDriver) readMapLen() int { - return d.readContainerLen(msgpackContainerMap) -} - -func (d *msgpackDecDriver) readArrayLen() int { - return d.readContainerLen(msgpackContainerList) -} - -func (d *msgpackDecDriver) readExtLen() (clen int) { - switch d.bd { - case mpNil: - clen = -1 // to represent nil - case mpFixExt1: - clen = 1 - case mpFixExt2: - clen = 2 - case mpFixExt4: - clen = 4 - case mpFixExt8: - clen = 8 - case mpFixExt16: - clen = 16 - case mpExt8: - clen = int(d.r.readn1()) - case mpExt16: - clen = int(d.r.readUint16()) - case mpExt32: - clen = int(d.r.readUint32()) - default: - decErr("decoding ext bytes: found unexpected byte: %x", d.bd) - } - return -} - -func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - xbd := d.bd - switch { - case xbd == mpBin8, xbd == mpBin16, xbd == mpBin32: - xbs, _ = d.decodeBytes(nil) - case xbd == mpStr8, xbd == mpStr16, xbd == mpStr32, - xbd >= mpFixStrMin && xbd <= mpFixStrMax: - xbs = []byte(d.decodeString()) - default: - clen := d.readExtLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - } - xbs = d.r.readn(clen) - } - d.bdRead = false - return -} - -//-------------------------------------------------- - -//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. -type MsgpackHandle struct { - BasicHandle - - // RawToString controls how raw bytes are decoded into a nil interface{}. - RawToString bool - // WriteExt flag supports encoding configured extensions with extension tags. - // It also controls whether other elements of the new spec are encoded (ie Str8). - // - // With WriteExt=false, configured extensions are serialized as raw bytes - // and Str8 is not encoded. - // - // A stream can still be decoded into a typed value, provided an appropriate value - // is provided, but the type cannot be inferred from the stream. If no appropriate - // type is provided (e.g. decoding into a nil interface{}), you get back - // a []byte or string based on the setting of RawToString. - WriteExt bool -} - -func (h *MsgpackHandle) newEncDriver(w encWriter) encDriver { - return &msgpackEncDriver{w: w, h: h} -} - -func (h *MsgpackHandle) newDecDriver(r decReader) decDriver { - return &msgpackDecDriver{r: r, h: h} -} - -func (h *MsgpackHandle) writeExt() bool { - return h.WriteExt -} - -func (h *MsgpackHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle -} - -//-------------------------------------------------- - -type msgpackSpecRpcCodec struct { - rpcCodec -} - -// /////////////// Spec RPC Codec /////////////////// -func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // WriteRequest can write to both a Go service, and other services that do - // not abide by the 1 argument rule of a Go service. - // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs - var bodyArr []interface{} - if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { - bodyArr = ([]interface{})(m) - } else { - bodyArr = []interface{}{body} - } - r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} - return c.write(r2, nil, false, true) -} - -func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - var moe interface{} - if r.Error != "" { - moe = r.Error - } - if moe != nil && body != nil { - body = nil - } - r2 := []interface{}{1, uint32(r.Seq), moe, body} - return c.write(r2, nil, false, true) -} - -func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { - return c.parseCustomHeader(1, &r.Seq, &r.Error) -} - -func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { - return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) -} - -func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { - if body == nil { // read and discard - return c.read(nil) - } - bodyArr := []interface{}{body} - return c.read(&bodyArr) -} - -func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { - - if c.cls { - return io.EOF - } - - // We read the response header by hand - // so that the body can be decoded on its own from the stream at a later time. - - const fia byte = 0x94 //four item array descriptor value - // Not sure why the panic of EOF is swallowed above. - // if bs1 := c.dec.r.readn1(); bs1 != fia { - // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) - // return - // } - var b byte - b, err = c.br.ReadByte() - if err != nil { - return - } - if b != fia { - err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) - return - } - - if err = c.read(&b); err != nil { - return - } - if b != expectTypeByte { - err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) - return - } - if err = c.read(msgid); err != nil { - return - } - if err = c.read(methodOrError); err != nil { - return - } - return -} - -//-------------------------------------------------- - -// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol -// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md -type msgpackSpecRpc struct{} - -// MsgpackSpecRpc implements Rpc using the communication protocol defined in -// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. -var MsgpackSpecRpc msgpackSpecRpc - -func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} -} - -func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { - return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} -} - -var _ decDriver = (*msgpackDecDriver)(nil) -var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py deleted file mode 100644 index e933838c5..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env python - -# This will create golden files in a directory passed to it. -# A Test calls this internally to create the golden files -# So it can process them (so we don't have to checkin the files). - -import msgpack, msgpackrpc, sys, os, threading - -def get_test_data_list(): - # get list with all primitive types, and a combo type - l0 = [ - -8, - -1616, - -32323232, - -6464646464646464, - 192, - 1616, - 32323232, - 6464646464646464, - 192, - -3232.0, - -6464646464.0, - 3232.0, - 6464646464.0, - False, - True, - None, - "someday", - "", - "bytestring", - 1328176922000002000, - -2206187877999998000, - 0, - -6795364578871345152 - ] - l1 = [ - { "true": True, - "false": False }, - { "true": "True", - "false": False, - "uint16(1616)": 1616 }, - { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], - "int32":32323232, "bool": True, - "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", - "SHORT STRING": "1234567890" }, - { True: "true", 8: False, "false": 0 } - ] - - l = [] - l.extend(l0) - l.append(l0) - l.extend(l1) - return l - -def build_test_data(destdir): - l = get_test_data_list() - for i in range(len(l)): - packer = msgpack.Packer() - serialized = packer.pack(l[i]) - f = open(os.path.join(destdir, str(i) + '.golden'), 'wb') - f.write(serialized) - f.close() - -def doRpcServer(port, stopTimeSec): - class EchoHandler(object): - def Echo123(self, msg1, msg2, msg3): - return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3)) - def EchoStruct(self, msg): - return ("%s" % msg) - - addr = msgpackrpc.Address('localhost', port) - server = msgpackrpc.Server(EchoHandler()) - server.listen(addr) - # run thread to stop it after stopTimeSec seconds if > 0 - if stopTimeSec > 0: - def myStopRpcServer(): - server.stop() - t = threading.Timer(stopTimeSec, myStopRpcServer) - t.start() - server.start() - -def doRpcClientToPythonSvc(port): - address = msgpackrpc.Address('localhost', port) - client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("Echo123", "A1", "B2", "C3") - print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) - -def doRpcClientToGoSvc(port): - # print ">>>> port: ", port, " <<<<<" - address = msgpackrpc.Address('localhost', port) - client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) - print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) - -def doMain(args): - if len(args) == 2 and args[0] == "testdata": - build_test_data(args[1]) - elif len(args) == 3 and args[0] == "rpc-server": - doRpcServer(int(args[1]), int(args[2])) - elif len(args) == 2 and args[0] == "rpc-client-python-service": - doRpcClientToPythonSvc(int(args[1])) - elif len(args) == 2 and args[0] == "rpc-client-go-service": - doRpcClientToGoSvc(int(args[1])) - else: - print("Usage: msgpack_test.py " + - "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") - -if __name__ == "__main__": - doMain(sys.argv[1:]) - diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/rpc.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/rpc.go deleted file mode 100644 index d014dbdcc..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/rpc.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "bufio" - "io" - "net/rpc" - "sync" -) - -// Rpc provides a rpc Server or Client Codec for rpc communication. -type Rpc interface { - ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec - ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec -} - -// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer -// used by the rpc connection. It accomodates use-cases where the connection -// should be used by rpc and non-rpc functions, e.g. streaming a file after -// sending an rpc response. -type RpcCodecBuffered interface { - BufferedReader() *bufio.Reader - BufferedWriter() *bufio.Writer -} - -// ------------------------------------- - -// rpcCodec defines the struct members and common methods. -type rpcCodec struct { - rwc io.ReadWriteCloser - dec *Decoder - enc *Encoder - bw *bufio.Writer - br *bufio.Reader - mu sync.Mutex - cls bool -} - -func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { - bw := bufio.NewWriter(conn) - br := bufio.NewReader(conn) - return rpcCodec{ - rwc: conn, - bw: bw, - br: br, - enc: NewEncoder(bw, h), - dec: NewDecoder(br, h), - } -} - -func (c *rpcCodec) BufferedReader() *bufio.Reader { - return c.br -} - -func (c *rpcCodec) BufferedWriter() *bufio.Writer { - return c.bw -} - -func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { - if c.cls { - return io.EOF - } - if err = c.enc.Encode(obj1); err != nil { - return - } - if writeObj2 { - if err = c.enc.Encode(obj2); err != nil { - return - } - } - if doFlush && c.bw != nil { - return c.bw.Flush() - } - return -} - -func (c *rpcCodec) read(obj interface{}) (err error) { - if c.cls { - return io.EOF - } - //If nil is passed in, we should still attempt to read content to nowhere. - if obj == nil { - var obj2 interface{} - return c.dec.Decode(&obj2) - } - return c.dec.Decode(obj) -} - -func (c *rpcCodec) Close() error { - if c.cls { - return io.EOF - } - c.cls = true - return c.rwc.Close() -} - -func (c *rpcCodec) ReadResponseBody(body interface{}) error { - return c.read(body) -} - -// ------------------------------------- - -type goRpcCodec struct { - rpcCodec -} - -func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { - // Must protect for concurrent access as per API - c.mu.Lock() - defer c.mu.Unlock() - return c.write(r, body, true, true) -} - -func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { - c.mu.Lock() - defer c.mu.Unlock() - return c.write(r, body, true, true) -} - -func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { - return c.read(r) -} - -func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { - return c.read(r) -} - -func (c *goRpcCodec) ReadRequestBody(body interface{}) error { - return c.read(body) -} - -// ------------------------------------- - -// goRpc is the implementation of Rpc that uses the communication protocol -// as defined in net/rpc package. -type goRpc struct{} - -// GoRpc implements Rpc using the communication protocol defined in net/rpc package. -// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. -var GoRpc goRpc - -func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { - return &goRpcCodec{newRPCCodec(conn, h)} -} - -func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { - return &goRpcCodec{newRPCCodec(conn, h)} -} - -var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/simple.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/simple.go deleted file mode 100644 index 9e4d148a2..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/simple.go +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import "math" - -const ( - _ uint8 = iota - simpleVdNil = 1 - simpleVdFalse = 2 - simpleVdTrue = 3 - simpleVdFloat32 = 4 - simpleVdFloat64 = 5 - - // each lasts for 4 (ie n, n+1, n+2, n+3) - simpleVdPosInt = 8 - simpleVdNegInt = 12 - - // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) - simpleVdString = 216 - simpleVdByteArray = 224 - simpleVdArray = 232 - simpleVdMap = 240 - simpleVdExt = 248 -) - -type simpleEncDriver struct { - h *SimpleHandle - w encWriter - //b [8]byte -} - -func (e *simpleEncDriver) isBuiltinType(rt uintptr) bool { - return false -} - -func (e *simpleEncDriver) encodeBuiltin(rt uintptr, v interface{}) { -} - -func (e *simpleEncDriver) encodeNil() { - e.w.writen1(simpleVdNil) -} - -func (e *simpleEncDriver) encodeBool(b bool) { - if b { - e.w.writen1(simpleVdTrue) - } else { - e.w.writen1(simpleVdFalse) - } -} - -func (e *simpleEncDriver) encodeFloat32(f float32) { - e.w.writen1(simpleVdFloat32) - e.w.writeUint32(math.Float32bits(f)) -} - -func (e *simpleEncDriver) encodeFloat64(f float64) { - e.w.writen1(simpleVdFloat64) - e.w.writeUint64(math.Float64bits(f)) -} - -func (e *simpleEncDriver) encodeInt(v int64) { - if v < 0 { - e.encUint(uint64(-v), simpleVdNegInt) - } else { - e.encUint(uint64(v), simpleVdPosInt) - } -} - -func (e *simpleEncDriver) encodeUint(v uint64) { - e.encUint(v, simpleVdPosInt) -} - -func (e *simpleEncDriver) encUint(v uint64, bd uint8) { - switch { - case v <= math.MaxUint8: - e.w.writen2(bd, uint8(v)) - case v <= math.MaxUint16: - e.w.writen1(bd + 1) - e.w.writeUint16(uint16(v)) - case v <= math.MaxUint32: - e.w.writen1(bd + 2) - e.w.writeUint32(uint32(v)) - case v <= math.MaxUint64: - e.w.writen1(bd + 3) - e.w.writeUint64(v) - } -} - -func (e *simpleEncDriver) encLen(bd byte, length int) { - switch { - case length == 0: - e.w.writen1(bd) - case length <= math.MaxUint8: - e.w.writen1(bd + 1) - e.w.writen1(uint8(length)) - case length <= math.MaxUint16: - e.w.writen1(bd + 2) - e.w.writeUint16(uint16(length)) - case int64(length) <= math.MaxUint32: - e.w.writen1(bd + 3) - e.w.writeUint32(uint32(length)) - default: - e.w.writen1(bd + 4) - e.w.writeUint64(uint64(length)) - } -} - -func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { - e.encLen(simpleVdExt, length) - e.w.writen1(xtag) -} - -func (e *simpleEncDriver) encodeArrayPreamble(length int) { - e.encLen(simpleVdArray, length) -} - -func (e *simpleEncDriver) encodeMapPreamble(length int) { - e.encLen(simpleVdMap, length) -} - -func (e *simpleEncDriver) encodeString(c charEncoding, v string) { - e.encLen(simpleVdString, len(v)) - e.w.writestr(v) -} - -func (e *simpleEncDriver) encodeSymbol(v string) { - e.encodeString(c_UTF8, v) -} - -func (e *simpleEncDriver) encodeStringBytes(c charEncoding, v []byte) { - e.encLen(simpleVdByteArray, len(v)) - e.w.writeb(v) -} - -//------------------------------------ - -type simpleDecDriver struct { - h *SimpleHandle - r decReader - bdRead bool - bdType valueType - bd byte - //b [8]byte -} - -func (d *simpleDecDriver) initReadNext() { - if d.bdRead { - return - } - d.bd = d.r.readn1() - d.bdRead = true - d.bdType = valueTypeUnset -} - -func (d *simpleDecDriver) currentEncodedType() valueType { - if d.bdType == valueTypeUnset { - switch d.bd { - case simpleVdNil: - d.bdType = valueTypeNil - case simpleVdTrue, simpleVdFalse: - d.bdType = valueTypeBool - case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - d.bdType = valueTypeUint - case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - d.bdType = valueTypeInt - case simpleVdFloat32, simpleVdFloat64: - d.bdType = valueTypeFloat - case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - d.bdType = valueTypeString - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - d.bdType = valueTypeBytes - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - d.bdType = valueTypeExt - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - d.bdType = valueTypeArray - case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - d.bdType = valueTypeMap - default: - decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.bd) - } - } - return d.bdType -} - -func (d *simpleDecDriver) tryDecodeAsNil() bool { - if d.bd == simpleVdNil { - d.bdRead = false - return true - } - return false -} - -func (d *simpleDecDriver) isBuiltinType(rt uintptr) bool { - return false -} - -func (d *simpleDecDriver) decodeBuiltin(rt uintptr, v interface{}) { -} - -func (d *simpleDecDriver) decIntAny() (ui uint64, i int64, neg bool) { - switch d.bd { - case simpleVdPosInt: - ui = uint64(d.r.readn1()) - i = int64(ui) - case simpleVdPosInt + 1: - ui = uint64(d.r.readUint16()) - i = int64(ui) - case simpleVdPosInt + 2: - ui = uint64(d.r.readUint32()) - i = int64(ui) - case simpleVdPosInt + 3: - ui = uint64(d.r.readUint64()) - i = int64(ui) - case simpleVdNegInt: - ui = uint64(d.r.readn1()) - i = -(int64(ui)) - neg = true - case simpleVdNegInt + 1: - ui = uint64(d.r.readUint16()) - i = -(int64(ui)) - neg = true - case simpleVdNegInt + 2: - ui = uint64(d.r.readUint32()) - i = -(int64(ui)) - neg = true - case simpleVdNegInt + 3: - ui = uint64(d.r.readUint64()) - i = -(int64(ui)) - neg = true - default: - decErr("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) - } - // don't do this check, because callers may only want the unsigned value. - // if ui > math.MaxInt64 { - // decErr("decIntAny: Integer out of range for signed int64: %v", ui) - // } - return -} - -func (d *simpleDecDriver) decodeInt(bitsize uint8) (i int64) { - _, i, _ = d.decIntAny() - checkOverflow(0, i, bitsize) - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeUint(bitsize uint8) (ui uint64) { - ui, i, neg := d.decIntAny() - if neg { - decErr("Assigning negative signed value: %v, to unsigned type", i) - } - checkOverflow(ui, 0, bitsize) - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { - switch d.bd { - case simpleVdFloat32: - f = float64(math.Float32frombits(d.r.readUint32())) - case simpleVdFloat64: - f = math.Float64frombits(d.r.readUint64()) - default: - if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { - _, i, _ := d.decIntAny() - f = float64(i) - } else { - decErr("Float only valid from float32/64: Invalid descriptor: %v", d.bd) - } - } - checkOverflowFloat32(f, chkOverflow32) - d.bdRead = false - return -} - -// bool can be decoded from bool only (single byte). -func (d *simpleDecDriver) decodeBool() (b bool) { - switch d.bd { - case simpleVdTrue: - b = true - case simpleVdFalse: - default: - decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) readMapLen() (length int) { - d.bdRead = false - return d.decLen() -} - -func (d *simpleDecDriver) readArrayLen() (length int) { - d.bdRead = false - return d.decLen() -} - -func (d *simpleDecDriver) decLen() int { - switch d.bd % 8 { - case 0: - return 0 - case 1: - return int(d.r.readn1()) - case 2: - return int(d.r.readUint16()) - case 3: - ui := uint64(d.r.readUint32()) - checkOverflow(ui, 0, intBitsize) - return int(ui) - case 4: - ui := d.r.readUint64() - checkOverflow(ui, 0, intBitsize) - return int(ui) - } - decErr("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) - return -1 -} - -func (d *simpleDecDriver) decodeString() (s string) { - s = string(d.r.readn(d.decLen())) - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { - if clen := d.decLen(); clen > 0 { - // if no contents in stream, don't update the passed byteslice - if len(bs) != clen { - if len(bs) > clen { - bs = bs[:clen] - } else { - bs = make([]byte, clen) - } - bsOut = bs - changed = true - } - d.r.readb(bs) - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { - switch d.bd { - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - l := d.decLen() - xtag = d.r.readn1() - if verifyTag && xtag != tag { - decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) - } - xbs = d.r.readn(l) - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - xbs, _ = d.decodeBytes(nil) - default: - decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) - } - d.bdRead = false - return -} - -func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { - d.initReadNext() - - switch d.bd { - case simpleVdNil: - vt = valueTypeNil - case simpleVdFalse: - vt = valueTypeBool - v = false - case simpleVdTrue: - vt = valueTypeBool - v = true - case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: - vt = valueTypeUint - ui, _, _ := d.decIntAny() - v = ui - case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: - vt = valueTypeInt - _, i, _ := d.decIntAny() - v = i - case simpleVdFloat32: - vt = valueTypeFloat - v = d.decodeFloat(true) - case simpleVdFloat64: - vt = valueTypeFloat - v = d.decodeFloat(false) - case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: - vt = valueTypeString - v = d.decodeString() - case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: - vt = valueTypeBytes - v, _ = d.decodeBytes(nil) - case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: - vt = valueTypeExt - l := d.decLen() - var re RawExt - re.Tag = d.r.readn1() - re.Data = d.r.readn(l) - v = &re - vt = valueTypeExt - case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: - vt = valueTypeArray - decodeFurther = true - case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: - vt = valueTypeMap - decodeFurther = true - default: - decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.bd) - } - - if !decodeFurther { - d.bdRead = false - } - return -} - -//------------------------------------ - -// SimpleHandle is a Handle for a very simple encoding format. -// -// simple is a simplistic codec similar to binc, but not as compact. -// - Encoding of a value is always preceeded by the descriptor byte (bd) -// - True, false, nil are encoded fully in 1 byte (the descriptor) -// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). -// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. -// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) -// - Lenght of containers (strings, bytes, array, map, extensions) -// are encoded in 0, 1, 2, 4 or 8 bytes. -// Zero-length containers have no length encoded. -// For others, the number of bytes is given by pow(2, bd%3) -// - maps are encoded as [bd] [length] [[key][value]]... -// - arrays are encoded as [bd] [length] [value]... -// - extensions are encoded as [bd] [length] [tag] [byte]... -// - strings/bytearrays are encoded as [bd] [length] [byte]... -// -// The full spec will be published soon. -type SimpleHandle struct { - BasicHandle -} - -func (h *SimpleHandle) newEncDriver(w encWriter) encDriver { - return &simpleEncDriver{w: w, h: h} -} - -func (h *SimpleHandle) newDecDriver(r decReader) decDriver { - return &simpleDecDriver{r: r, h: h} -} - -func (_ *SimpleHandle) writeExt() bool { - return true -} - -func (h *SimpleHandle) getBasicHandle() *BasicHandle { - return &h.BasicHandle -} - -var _ decDriver = (*simpleDecDriver)(nil) -var _ encDriver = (*simpleEncDriver)(nil) diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/time.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/time.go deleted file mode 100644 index c86d65328..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/time.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -import ( - "time" -) - -var ( - timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} -) - -// EncodeTime encodes a time.Time as a []byte, including -// information on the instant in time and UTC offset. -// -// Format Description -// -// A timestamp is composed of 3 components: -// -// - secs: signed integer representing seconds since unix epoch -// - nsces: unsigned integer representing fractional seconds as a -// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 -// - tz: signed integer representing timezone offset in minutes east of UTC, -// and a dst (daylight savings time) flag -// -// When encoding a timestamp, the first byte is the descriptor, which -// defines which components are encoded and how many bytes are used to -// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it -// is not encoded in the byte array explicitly*. -// -// Descriptor 8 bits are of the form `A B C DDD EE`: -// A: Is secs component encoded? 1 = true -// B: Is nsecs component encoded? 1 = true -// C: Is tz component encoded? 1 = true -// DDD: Number of extra bytes for secs (range 0-7). -// If A = 1, secs encoded in DDD+1 bytes. -// If A = 0, secs is not encoded, and is assumed to be 0. -// If A = 1, then we need at least 1 byte to encode secs. -// DDD says the number of extra bytes beyond that 1. -// E.g. if DDD=0, then secs is represented in 1 byte. -// if DDD=2, then secs is represented in 3 bytes. -// EE: Number of extra bytes for nsecs (range 0-3). -// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) -// -// Following the descriptor bytes, subsequent bytes are: -// -// secs component encoded in `DDD + 1` bytes (if A == 1) -// nsecs component encoded in `EE + 1` bytes (if B == 1) -// tz component encoded in 2 bytes (if C == 1) -// -// secs and nsecs components are integers encoded in a BigEndian -// 2-complement encoding format. -// -// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to -// Least significant bit 0 are described below: -// -// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). -// Bit 15 = have\_dst: set to 1 if we set the dst flag. -// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. -// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. -// -func encodeTime(t time.Time) []byte { - //t := rv.Interface().(time.Time) - tsecs, tnsecs := t.Unix(), t.Nanosecond() - var ( - bd byte - btmp [8]byte - bs [16]byte - i int = 1 - ) - l := t.Location() - if l == time.UTC { - l = nil - } - if tsecs != 0 { - bd = bd | 0x80 - bigen.PutUint64(btmp[:], uint64(tsecs)) - f := pruneSignExt(btmp[:], tsecs >= 0) - bd = bd | (byte(7-f) << 2) - copy(bs[i:], btmp[f:]) - i = i + (8 - f) - } - if tnsecs != 0 { - bd = bd | 0x40 - bigen.PutUint32(btmp[:4], uint32(tnsecs)) - f := pruneSignExt(btmp[:4], true) - bd = bd | byte(3-f) - copy(bs[i:], btmp[f:4]) - i = i + (4 - f) - } - if l != nil { - bd = bd | 0x20 - // Note that Go Libs do not give access to dst flag. - _, zoneOffset := t.Zone() - //zoneName, zoneOffset := t.Zone() - zoneOffset /= 60 - z := uint16(zoneOffset) - bigen.PutUint16(btmp[:2], z) - // clear dst flags - bs[i] = btmp[0] & 0x3f - bs[i+1] = btmp[1] - i = i + 2 - } - bs[0] = bd - return bs[0:i] -} - -// DecodeTime decodes a []byte into a time.Time. -func decodeTime(bs []byte) (tt time.Time, err error) { - bd := bs[0] - var ( - tsec int64 - tnsec uint32 - tz uint16 - i byte = 1 - i2 byte - n byte - ) - if bd&(1<<7) != 0 { - var btmp [8]byte - n = ((bd >> 2) & 0x7) + 1 - i2 = i + n - copy(btmp[8-n:], bs[i:i2]) - //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) - if bs[i]&(1<<7) != 0 { - copy(btmp[0:8-n], bsAll0xff) - //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } - } - i = i2 - tsec = int64(bigen.Uint64(btmp[:])) - } - if bd&(1<<6) != 0 { - var btmp [4]byte - n = (bd & 0x3) + 1 - i2 = i + n - copy(btmp[4-n:], bs[i:i2]) - i = i2 - tnsec = bigen.Uint32(btmp[:]) - } - if bd&(1<<5) == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - return - } - // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. - // However, we need name here, so it can be shown when time is printed. - // Zone name is in form: UTC-08:00. - // Note that Go Libs do not give access to dst flag, so we ignore dst bits - - i2 = i + 2 - tz = bigen.Uint16(bs[i:i2]) - i = i2 - // sign extend sign bit into top 2 MSB (which were dst bits): - if tz&(1<<13) == 0 { // positive - tz = tz & 0x3fff //clear 2 MSBs: dst bits - } else { // negative - tz = tz | 0xc000 //set 2 MSBs: dst bits - //tzname[3] = '-' (TODO: verify. this works here) - } - tzint := int16(tz) - if tzint == 0 { - tt = time.Unix(tsec, int64(tnsec)).UTC() - } else { - // For Go Time, do not use a descriptive timezone. - // It's unnecessary, and makes it harder to do a reflect.DeepEqual. - // The Offset already tells what the offset should be, if not on UTC and unknown zone name. - // var zoneName = timeLocUTCName(tzint) - tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) - } - return -} - -func timeLocUTCName(tzint int16) string { - if tzint == 0 { - return "UTC" - } - var tzname = []byte("UTC+00:00") - //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. - //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first - var tzhr, tzmin int16 - if tzint < 0 { - tzname[3] = '-' // (TODO: verify. this works here) - tzhr, tzmin = -tzint/60, (-tzint)%60 - } else { - tzhr, tzmin = tzint/60, tzint%60 - } - tzname[4] = timeDigits[tzhr/10] - tzname[5] = timeDigits[tzhr%10] - tzname[7] = timeDigits[tzmin/10] - tzname[8] = timeDigits[tzmin%10] - return string(tzname) - //return time.FixedZone(string(tzname), int(tzint)*60) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/z_helper_test.go b/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/z_helper_test.go deleted file mode 100644 index 2e9b3a0f0..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/go-msgpack/codec/z_helper_test.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a BSD-style license found in the LICENSE file. - -package codec - -// All non-std package dependencies related to testing live in this file, -// so porting to different environment is easy (just update functions). -// -// Also, this file is called z_helper_test, to give a "hint" to compiler -// that its init() function should be called last. (not guaranteed by spec) - -import ( - "errors" - "reflect" - "flag" - "testing" -) - -var ( - testLogToT = true - failNowOnFail = true -) - -func init() { - testInitFlags() - benchInitFlags() - flag.Parse() - testInit() - benchInit() -} - -func checkErrT(t *testing.T, err error) { - if err != nil { - logT(t, err.Error()) - failT(t) - } -} - -func checkEqualT(t *testing.T, v1 interface{}, v2 interface{}, desc string) (err error) { - if err = deepEqual(v1, v2); err != nil { - logT(t, "Not Equal: %s: %v. v1: %v, v2: %v", desc, err, v1, v2) - failT(t) - } - return -} - -func logT(x interface{}, format string, args ...interface{}) { - if t, ok := x.(*testing.T); ok && t != nil && testLogToT { - t.Logf(format, args...) - } else if b, ok := x.(*testing.B); ok && b != nil && testLogToT { - b.Logf(format, args...) - } else { - debugf(format, args...) - } -} - -func failT(t *testing.T) { - if failNowOnFail { - t.FailNow() - } else { - t.Fail() - } -} - -func deepEqual(v1, v2 interface{}) (err error) { - if !reflect.DeepEqual(v1, v2) { - err = errors.New("Not Match") - } - return -} - -func approxDataSize(rv reflect.Value) (sum int) { - switch rk := rv.Kind(); rk { - case reflect.Invalid: - case reflect.Ptr, reflect.Interface: - sum += int(rv.Type().Size()) - sum += approxDataSize(rv.Elem()) - case reflect.Slice: - sum += int(rv.Type().Size()) - for j := 0; j < rv.Len(); j++ { - sum += approxDataSize(rv.Index(j)) - } - case reflect.String: - sum += int(rv.Type().Size()) - sum += rv.Len() - case reflect.Map: - sum += int(rv.Type().Size()) - for _, mk := range rv.MapKeys() { - sum += approxDataSize(mk) - sum += approxDataSize(rv.MapIndex(mk)) - } - case reflect.Struct: - //struct size already includes the full data size. - //sum += int(rv.Type().Size()) - for j := 0; j < rv.NumField(); j++ { - sum += approxDataSize(rv.Field(j)) - } - default: - //pure value types - sum += int(rv.Type().Size()) - } - return -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/LICENSE b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/LICENSE deleted file mode 100644 index f0e5c79e1..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/LICENSE +++ /dev/null @@ -1,362 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/README.md b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/README.md deleted file mode 100644 index 5d7180ab9..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/README.md +++ /dev/null @@ -1,11 +0,0 @@ -raft-boltdb -=========== - -This repository provides the `raftboltdb` package. The package exports the -`BoltStore` which is an implementation of both a `LogStore` and `StableStore`. - -It is meant to be used as a backend for the `raft` [package -here](https://github.com/hashicorp/raft). - -This implementation uses [BoltDB](https://github.com/boltdb/bolt). BoltDB is -a simple key/value store implemented in pure Go, and inspired by LMDB. diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bench_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bench_test.go deleted file mode 100644 index b860706fd..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bench_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package raftboltdb - -import ( - "os" - "testing" - - "github.com/hashicorp/raft/bench" -) - -func BenchmarkBoltStore_FirstIndex(b *testing.B) { - store := testBoltStore(b) - defer store.Close() - defer os.Remove(store.path) - - raftbench.FirstIndex(b, store) -} - -func BenchmarkBoltStore_LastIndex(b *testing.B) { - store := testBoltStore(b) - defer store.Close() - defer os.Remove(store.path) - - raftbench.LastIndex(b, store) -} - -func BenchmarkBoltStore_GetLog(b *testing.B) { - store := testBoltStore(b) - defer store.Close() - defer os.Remove(store.path) - - raftbench.GetLog(b, store) -} - -func BenchmarkBoltStore_StoreLog(b *testing.B) { - store := testBoltStore(b) - defer store.Close() - defer os.Remove(store.path) - - raftbench.StoreLog(b, store) -} - -func BenchmarkBoltStore_StoreLogs(b *testing.B) { - store := testBoltStore(b) - defer store.Close() - defer os.Remove(store.path) - - raftbench.StoreLogs(b, store) -} - -func BenchmarkBoltStore_DeleteRange(b *testing.B) { - store := testBoltStore(b) - defer store.Close() - defer os.Remove(store.path) - - raftbench.DeleteRange(b, store) -} - -func BenchmarkBoltStore_Set(b *testing.B) { - store := testBoltStore(b) - defer store.Close() - defer os.Remove(store.path) - - raftbench.Set(b, store) -} - -func BenchmarkBoltStore_Get(b *testing.B) { - store := testBoltStore(b) - defer store.Close() - defer os.Remove(store.path) - - raftbench.Get(b, store) -} - -func BenchmarkBoltStore_SetUint64(b *testing.B) { - store := testBoltStore(b) - defer store.Close() - defer os.Remove(store.path) - - raftbench.SetUint64(b, store) -} - -func BenchmarkBoltStore_GetUint64(b *testing.B) { - store := testBoltStore(b) - defer store.Close() - defer os.Remove(store.path) - - raftbench.GetUint64(b, store) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store.go b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store.go deleted file mode 100644 index ab6dd4803..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store.go +++ /dev/null @@ -1,231 +0,0 @@ -package raftboltdb - -import ( - "errors" - - "github.com/boltdb/bolt" - "github.com/hashicorp/raft" -) - -const ( - // Permissions to use on the db file. This is only used if the - // database file does not exist and needs to be created. - dbFileMode = 0600 -) - -var ( - // Bucket names we perform transactions in - dbLogs = []byte("logs") - dbConf = []byte("conf") - - // An error indicating a given key does not exist - ErrKeyNotFound = errors.New("not found") -) - -// BoltStore provides access to BoltDB for Raft to store and retrieve -// log entries. It also provides key/value storage, and can be used as -// a LogStore and StableStore. -type BoltStore struct { - // conn is the underlying handle to the db. - conn *bolt.DB - - // The path to the Bolt database file - path string -} - -// NewBoltStore takes a file path and returns a connected Raft backend. -func NewBoltStore(path string) (*BoltStore, error) { - // Try to connect - handle, err := bolt.Open(path, dbFileMode, nil) - if err != nil { - return nil, err - } - - // Create the new store - store := &BoltStore{ - conn: handle, - path: path, - } - - // Set up our buckets - if err := store.initialize(); err != nil { - store.Close() - return nil, err - } - - return store, nil -} - -// initialize is used to set up all of the buckets. -func (b *BoltStore) initialize() error { - tx, err := b.conn.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - // Create all the buckets - if _, err := tx.CreateBucketIfNotExists(dbLogs); err != nil { - return err - } - if _, err := tx.CreateBucketIfNotExists(dbConf); err != nil { - return err - } - - return tx.Commit() -} - -// Close is used to gracefully close the DB connection. -func (b *BoltStore) Close() error { - return b.conn.Close() -} - -// FirstIndex returns the first known index from the Raft log. -func (b *BoltStore) FirstIndex() (uint64, error) { - tx, err := b.conn.Begin(false) - if err != nil { - return 0, err - } - defer tx.Rollback() - - curs := tx.Bucket(dbLogs).Cursor() - if first, _ := curs.First(); first == nil { - return 0, nil - } else { - return bytesToUint64(first), nil - } -} - -// LastIndex returns the last known index from the Raft log. -func (b *BoltStore) LastIndex() (uint64, error) { - tx, err := b.conn.Begin(false) - if err != nil { - return 0, err - } - defer tx.Rollback() - - curs := tx.Bucket(dbLogs).Cursor() - if last, _ := curs.Last(); last == nil { - return 0, nil - } else { - return bytesToUint64(last), nil - } -} - -// GetLog is used to retrieve a log from BoltDB at a given index. -func (b *BoltStore) GetLog(idx uint64, log *raft.Log) error { - tx, err := b.conn.Begin(false) - if err != nil { - return err - } - defer tx.Rollback() - - bucket := tx.Bucket(dbLogs) - val := bucket.Get(uint64ToBytes(idx)) - - if val == nil { - return raft.ErrLogNotFound - } - return decodeMsgPack(val, log) -} - -// StoreLog is used to store a single raft log -func (b *BoltStore) StoreLog(log *raft.Log) error { - return b.StoreLogs([]*raft.Log{log}) -} - -// StoreLogs is used to store a set of raft logs -func (b *BoltStore) StoreLogs(logs []*raft.Log) error { - tx, err := b.conn.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - for _, log := range logs { - key := uint64ToBytes(log.Index) - val, err := encodeMsgPack(log) - if err != nil { - return err - } - bucket := tx.Bucket(dbLogs) - if err := bucket.Put(key, val.Bytes()); err != nil { - return err - } - } - - return tx.Commit() -} - -// DeleteRange is used to delete logs within a given range inclusively. -func (b *BoltStore) DeleteRange(min, max uint64) error { - minKey := uint64ToBytes(min) - - tx, err := b.conn.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - curs := tx.Bucket(dbLogs).Cursor() - for k, _ := curs.Seek(minKey); k != nil; k, _ = curs.Next() { - // Handle out-of-range log index - if bytesToUint64(k) > max { - break - } - - // Delete in-range log index - if err := curs.Delete(); err != nil { - return err - } - } - - return tx.Commit() -} - -// Set is used to set a key/value set outside of the raft log -func (b *BoltStore) Set(k, v []byte) error { - tx, err := b.conn.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - bucket := tx.Bucket(dbConf) - if err := bucket.Put(k, v); err != nil { - return err - } - - return tx.Commit() -} - -// Get is used to retrieve a value from the k/v store by key -func (b *BoltStore) Get(k []byte) ([]byte, error) { - tx, err := b.conn.Begin(false) - if err != nil { - return nil, err - } - defer tx.Rollback() - - bucket := tx.Bucket(dbConf) - val := bucket.Get(k) - - if val == nil { - return nil, ErrKeyNotFound - } - return append([]byte{}, val...), nil -} - -// SetUint64 is like Set, but handles uint64 values -func (b *BoltStore) SetUint64(key []byte, val uint64) error { - return b.Set(key, uint64ToBytes(val)) -} - -// GetUint64 is like Get, but handles uint64 values -func (b *BoltStore) GetUint64(key []byte) (uint64, error) { - val, err := b.Get(key) - if err != nil { - return 0, err - } - return bytesToUint64(val), nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store_test.go deleted file mode 100644 index ab2a1525c..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/bolt_store_test.go +++ /dev/null @@ -1,332 +0,0 @@ -package raftboltdb - -import ( - "bytes" - "io/ioutil" - "os" - "reflect" - "testing" - - "github.com/boltdb/bolt" - "github.com/hashicorp/raft" -) - -func testBoltStore(t testing.TB) *BoltStore { - fh, err := ioutil.TempFile("", "bolt") - if err != nil { - t.Fatalf("err: %s", err) - } - os.Remove(fh.Name()) - - // Successfully creates and returns a store - store, err := NewBoltStore(fh.Name()) - if err != nil { - t.Fatalf("err: %s", err) - } - - return store -} - -func testRaftLog(idx uint64, data string) *raft.Log { - return &raft.Log{ - Data: []byte(data), - Index: idx, - } -} - -func TestBoltStore_Implements(t *testing.T) { - var store interface{} = &BoltStore{} - if _, ok := store.(raft.StableStore); !ok { - t.Fatalf("BoltStore does not implement raft.StableStore") - } - if _, ok := store.(raft.LogStore); !ok { - t.Fatalf("BoltStore does not implement raft.LogStore") - } -} - -func TestNewBoltStore(t *testing.T) { - fh, err := ioutil.TempFile("", "bolt") - if err != nil { - t.Fatalf("err: %s", err) - } - os.Remove(fh.Name()) - defer os.Remove(fh.Name()) - - // Successfully creates and returns a store - store, err := NewBoltStore(fh.Name()) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Ensure the file was created - if store.path != fh.Name() { - t.Fatalf("unexpected file path %q", store.path) - } - if _, err := os.Stat(fh.Name()); err != nil { - t.Fatalf("err: %s", err) - } - - // Close the store so we can open again - if err := store.Close(); err != nil { - t.Fatalf("err: %s", err) - } - - // Ensure our tables were created - db, err := bolt.Open(fh.Name(), dbFileMode, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - tx, err := db.Begin(true) - if err != nil { - t.Fatalf("err: %s", err) - } - if _, err := tx.CreateBucket([]byte(dbLogs)); err != bolt.ErrBucketExists { - t.Fatalf("bad: %v", err) - } - if _, err := tx.CreateBucket([]byte(dbConf)); err != bolt.ErrBucketExists { - t.Fatalf("bad: %v", err) - } -} - -func TestBoltStore_FirstIndex(t *testing.T) { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Should get 0 index on empty log - idx, err := store.FirstIndex() - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != 0 { - t.Fatalf("bad: %v", idx) - } - - // Set a mock raft log - logs := []*raft.Log{ - testRaftLog(1, "log1"), - testRaftLog(2, "log2"), - testRaftLog(3, "log3"), - } - if err := store.StoreLogs(logs); err != nil { - t.Fatalf("bad: %s", err) - } - - // Fetch the first Raft index - idx, err = store.FirstIndex() - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != 1 { - t.Fatalf("bad: %d", idx) - } -} - -func TestBoltStore_LastIndex(t *testing.T) { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Should get 0 index on empty log - idx, err := store.LastIndex() - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != 0 { - t.Fatalf("bad: %v", idx) - } - - // Set a mock raft log - logs := []*raft.Log{ - testRaftLog(1, "log1"), - testRaftLog(2, "log2"), - testRaftLog(3, "log3"), - } - if err := store.StoreLogs(logs); err != nil { - t.Fatalf("bad: %s", err) - } - - // Fetch the last Raft index - idx, err = store.LastIndex() - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != 3 { - t.Fatalf("bad: %d", idx) - } -} - -func TestBoltStore_GetLog(t *testing.T) { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - log := new(raft.Log) - - // Should return an error on non-existent log - if err := store.GetLog(1, log); err != raft.ErrLogNotFound { - t.Fatalf("expected raft log not found error, got: %v", err) - } - - // Set a mock raft log - logs := []*raft.Log{ - testRaftLog(1, "log1"), - testRaftLog(2, "log2"), - testRaftLog(3, "log3"), - } - if err := store.StoreLogs(logs); err != nil { - t.Fatalf("bad: %s", err) - } - - // Should return the proper log - if err := store.GetLog(2, log); err != nil { - t.Fatalf("err: %s", err) - } - if !reflect.DeepEqual(log, logs[1]) { - t.Fatalf("bad: %#v", log) - } -} - -func TestBoltStore_SetLog(t *testing.T) { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Create the log - log := &raft.Log{ - Data: []byte("log1"), - Index: 1, - } - - // Attempt to store the log - if err := store.StoreLog(log); err != nil { - t.Fatalf("err: %s", err) - } - - // Retrieve the log again - result := new(raft.Log) - if err := store.GetLog(1, result); err != nil { - t.Fatalf("err: %s", err) - } - - // Ensure the log comes back the same - if !reflect.DeepEqual(log, result) { - t.Fatalf("bad: %v", result) - } -} - -func TestBoltStore_SetLogs(t *testing.T) { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Create a set of logs - logs := []*raft.Log{ - testRaftLog(1, "log1"), - testRaftLog(2, "log2"), - } - - // Attempt to store the logs - if err := store.StoreLogs(logs); err != nil { - t.Fatalf("err: %s", err) - } - - // Ensure we stored them all - result1, result2 := new(raft.Log), new(raft.Log) - if err := store.GetLog(1, result1); err != nil { - t.Fatalf("err: %s", err) - } - if !reflect.DeepEqual(logs[0], result1) { - t.Fatalf("bad: %#v", result1) - } - if err := store.GetLog(2, result2); err != nil { - t.Fatalf("err: %s", err) - } - if !reflect.DeepEqual(logs[1], result2) { - t.Fatalf("bad: %#v", result2) - } -} - -func TestBoltStore_DeleteRange(t *testing.T) { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Create a set of logs - log1 := testRaftLog(1, "log1") - log2 := testRaftLog(2, "log2") - log3 := testRaftLog(3, "log3") - logs := []*raft.Log{log1, log2, log3} - - // Attempt to store the logs - if err := store.StoreLogs(logs); err != nil { - t.Fatalf("err: %s", err) - } - - // Attempt to delete a range of logs - if err := store.DeleteRange(1, 2); err != nil { - t.Fatalf("err: %s", err) - } - - // Ensure the logs were deleted - if err := store.GetLog(1, new(raft.Log)); err != raft.ErrLogNotFound { - t.Fatalf("should have deleted log1") - } - if err := store.GetLog(2, new(raft.Log)); err != raft.ErrLogNotFound { - t.Fatalf("should have deleted log2") - } -} - -func TestBoltStore_Set_Get(t *testing.T) { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Returns error on non-existent key - if _, err := store.Get([]byte("bad")); err != ErrKeyNotFound { - t.Fatalf("expected not found error, got: %q", err) - } - - k, v := []byte("hello"), []byte("world") - - // Try to set a k/v pair - if err := store.Set(k, v); err != nil { - t.Fatalf("err: %s", err) - } - - // Try to read it back - val, err := store.Get(k) - if err != nil { - t.Fatalf("err: %s", err) - } - if !bytes.Equal(val, v) { - t.Fatalf("bad: %v", val) - } -} - -func TestBoltStore_SetUint64_GetUint64(t *testing.T) { - store := testBoltStore(t) - defer store.Close() - defer os.Remove(store.path) - - // Returns error on non-existent key - if _, err := store.GetUint64([]byte("bad")); err != ErrKeyNotFound { - t.Fatalf("expected not found error, got: %q", err) - } - - k, v := []byte("abc"), uint64(123) - - // Attempt to set the k/v pair - if err := store.SetUint64(k, v); err != nil { - t.Fatalf("err: %s", err) - } - - // Read back the value - val, err := store.GetUint64(k) - if err != nil { - t.Fatalf("err: %s", err) - } - if val != v { - t.Fatalf("bad: %v", val) - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/util.go b/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/util.go deleted file mode 100644 index 68dd786b7..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft-boltdb/util.go +++ /dev/null @@ -1,37 +0,0 @@ -package raftboltdb - -import ( - "bytes" - "encoding/binary" - - "github.com/hashicorp/go-msgpack/codec" -) - -// Decode reverses the encode operation on a byte slice input -func decodeMsgPack(buf []byte, out interface{}) error { - r := bytes.NewBuffer(buf) - hd := codec.MsgpackHandle{} - dec := codec.NewDecoder(r, &hd) - return dec.Decode(out) -} - -// Encode writes an encoded object to a new bytes buffer -func encodeMsgPack(in interface{}) (*bytes.Buffer, error) { - buf := bytes.NewBuffer(nil) - hd := codec.MsgpackHandle{} - enc := codec.NewEncoder(buf, &hd) - err := enc.Encode(in) - return buf, err -} - -// Converts bytes to an integer -func bytesToUint64(b []byte) uint64 { - return binary.BigEndian.Uint64(b) -} - -// Converts a uint to a byte slice -func uint64ToBytes(u uint64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, u) - return buf -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/.gitignore b/Godeps/_workspace/src/github.com/hashicorp/raft/.gitignore deleted file mode 100644 index 836562412..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/.travis.yml b/Godeps/_workspace/src/github.com/hashicorp/raft/.travis.yml deleted file mode 100644 index 5cf041d26..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go - -go: - - 1.2 - - tip - -install: make deps -script: - - make integ - -notifications: - flowdock: - secure: fZrcf9rlh2IrQrlch1sHkn3YI7SKvjGnAl/zyV5D6NROe1Bbr6d3QRMuCXWWdhJHzjKmXk5rIzbqJhUc0PNF7YjxGNKSzqWMQ56KcvN1k8DzlqxpqkcA3Jbs6fXCWo2fssRtZ7hj/wOP1f5n6cc7kzHDt9dgaYJ6nO2fqNPJiTc= - diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/LICENSE b/Godeps/_workspace/src/github.com/hashicorp/raft/LICENSE deleted file mode 100644 index c33dcc7c9..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/Makefile b/Godeps/_workspace/src/github.com/hashicorp/raft/Makefile deleted file mode 100644 index c61b34a8f..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...) - -test: - go test -timeout=5s ./... - -integ: test - INTEG_TESTS=yes go test -timeout=3s -run=Integ ./... - -deps: - go get -d -v ./... - echo $(DEPS) | xargs -n1 go get -d - -cov: - INTEG_TESTS=yes gocov test github.com/hashicorp/raft | gocov-html > /tmp/coverage.html - open /tmp/coverage.html - -.PHONY: test cov integ deps diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/README.md b/Godeps/_workspace/src/github.com/hashicorp/raft/README.md deleted file mode 100644 index ecb6c977e..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/README.md +++ /dev/null @@ -1,89 +0,0 @@ -raft [![Build Status](https://travis-ci.org/hashicorp/raft.png)](https://travis-ci.org/hashicorp/raft) -==== - -raft is a [Go](http://www.golang.org) library that manages a replicated -log and can be used with an FSM to manage replicated state machines. It -is library for providing [consensus](http://en.wikipedia.org/wiki/Consensus_(computer_science)). - -The use cases for such a library are far-reaching as replicated state -machines are a key component of many distributed systems. They enable -building Consistent, Partition Tolerant (CP) systems, with limited -fault tolerance as well. - -## Building - -If you wish to build raft you'll need Go version 1.2+ installed. - -Please check your installation with: - -``` -go version -``` - -## Documentation - -For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/raft). - -To prevent complications with cgo, the primary backend `MDBStore` is in a separate repositoy, -called [raft-mdb](http://github.com/hashicorp/raft-mdb). That is the recommended implementation -for the `LogStore` and `StableStore`. - -A pure Go backend using [BoltDB](https://github.com/boltdb/bolt) is also available called -[raft-boltdb](https://github.com/hashicorp/raft-boltdb). It can also be used as a `LogStore` -and `StableStore`. - -## Protocol - -raft is based on ["Raft: In Search of an Understandable Consensus Algorithm"](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf) - -A high level overview of the Raft protocol is described below, but for details please read the full -[Raft paper](https://ramcloud.stanford.edu/wiki/download/attachments/11370504/raft.pdf) -followed by the raft source. Any questions about the raft protocol should be sent to the -[raft-dev mailing list](https://groups.google.com/forum/#!forum/raft-dev). - -### Protocol Description - -Raft nodes are always in one of three states: follower, candidate or leader. All -nodes initially start out as a follower. In this state, nodes can accept log entries -from a leader and cast votes. If no entries are received for some time, nodes -self-promote to the candidate state. In the candidate state nodes request votes from -their peers. If a candidate receives a quorum of votes, then it is promoted to a leader. -The leader must accept new log entries and replicate to all the other followers. -In addition, if stale reads are not acceptable, all queries must also be performed on -the leader. - -Once a cluster has a leader, it is able to accept new log entries. A client can -request that a leader append a new log entry, which is an opaque binary blob to -Raft. The leader then writes the entry to durable storage and attempts to replicate -to a quorum of followers. Once the log entry is considered *committed*, it can be -*applied* to a finite state machine. The finite state machine is application specific, -and is implemented using an interface. - -An obvious question relates to the unbounded nature of a replicated log. Raft provides -a mechanism by which the current state is snapshotted, and the log is compacted. Because -of the FSM abstraction, restoring the state of the FSM must result in the same state -as a replay of old logs. This allows Raft to capture the FSM state at a point in time, -and then remove all the logs that were used to reach that state. This is performed automatically -without user intervention, and prevents unbounded disk usage as well as minimizing -time spent replaying logs. - -Lastly, there is the issue of updating the peer set when new servers are joining -or existing servers are leaving. As long as a quorum of nodes is available, this -is not an issue as Raft provides mechanisms to dynamically update the peer set. -If a quorum of nodes is unavailable, then this becomes a very challenging issue. -For example, suppose there are only 2 peers, A and B. The quorum size is also -2, meaning both nodes must agree to commit a log entry. If either A or B fails, -it is now impossible to reach quorum. This means the cluster is unable to add, -or remove a node, or commit any additional log entries. This results in *unavailability*. -At this point, manual intervention would be required to remove either A or B, -and to restart the remaining node in bootstrap mode. - -A Raft cluster of 3 nodes can tolerate a single node failure, while a cluster -of 5 can tolerate 2 node failures. The recommended configuration is to either -run 3 or 5 raft servers. This maximizes availability without -greatly sacrificing performance. - -In terms of performance, Raft is comparable to Paxos. Assuming stable leadership, -committing a log entry requires a single round trip to half of the cluster. -Thus performance is bound by disk I/O and network latency. - diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/bench/bench.go b/Godeps/_workspace/src/github.com/hashicorp/raft/bench/bench.go deleted file mode 100644 index d7a58f45f..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/bench/bench.go +++ /dev/null @@ -1,171 +0,0 @@ -package raftbench - -// raftbench provides common benchmarking functions which can be used by -// anything which implements the raft.LogStore and raft.StableStore interfaces. -// All functions accept these interfaces and perform benchmarking. This -// makes comparing backend performance easier by sharing the tests. - -import ( - "github.com/hashicorp/raft" - "testing" -) - -func FirstIndex(b *testing.B, store raft.LogStore) { - // Create some fake data - var logs []*raft.Log - for i := 1; i < 10; i++ { - logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) - } - if err := store.StoreLogs(logs); err != nil { - b.Fatalf("err: %s", err) - } - b.ResetTimer() - - // Run FirstIndex a number of times - for n := 0; n < b.N; n++ { - store.FirstIndex() - } -} - -func LastIndex(b *testing.B, store raft.LogStore) { - // Create some fake data - var logs []*raft.Log - for i := 1; i < 10; i++ { - logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) - } - if err := store.StoreLogs(logs); err != nil { - b.Fatalf("err: %s", err) - } - b.ResetTimer() - - // Run LastIndex a number of times - for n := 0; n < b.N; n++ { - store.LastIndex() - } -} - -func GetLog(b *testing.B, store raft.LogStore) { - // Create some fake data - var logs []*raft.Log - for i := 1; i < 10; i++ { - logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) - } - if err := store.StoreLogs(logs); err != nil { - b.Fatalf("err: %s", err) - } - b.ResetTimer() - - // Run GetLog a number of times - for n := 0; n < b.N; n++ { - if err := store.GetLog(5, new(raft.Log)); err != nil { - b.Fatalf("err: %s", err) - } - } -} - -func StoreLog(b *testing.B, store raft.LogStore) { - // Run StoreLog a number of times - for n := 0; n < b.N; n++ { - log := &raft.Log{Index: uint64(n), Data: []byte("data")} - if err := store.StoreLog(log); err != nil { - b.Fatalf("err: %s", err) - } - } -} - -func StoreLogs(b *testing.B, store raft.LogStore) { - // Run StoreLogs a number of times. We want to set multiple logs each - // run, so we create 3 logs with incrementing indexes for each iteration. - for n := 0; n < b.N; n++ { - b.StopTimer() - offset := 3 * (n + 1) - logs := []*raft.Log{ - &raft.Log{Index: uint64(offset - 2), Data: []byte("data")}, - &raft.Log{Index: uint64(offset - 1), Data: []byte("data")}, - &raft.Log{Index: uint64(offset), Data: []byte("data")}, - } - b.StartTimer() - - if err := store.StoreLogs(logs); err != nil { - b.Fatalf("err: %s", err) - } - } -} - -func DeleteRange(b *testing.B, store raft.LogStore) { - // Create some fake data. In this case, we create 3 new log entries for each - // test case, and separate them by index in multiples of 10. This allows - // some room so that we can test deleting ranges with "extra" logs to - // to ensure we stop going to the database once our max index is hit. - var logs []*raft.Log - for n := 0; n < b.N; n++ { - offset := 10 * n - for i := offset; i < offset+3; i++ { - logs = append(logs, &raft.Log{Index: uint64(i), Data: []byte("data")}) - } - } - if err := store.StoreLogs(logs); err != nil { - b.Fatalf("err: %s", err) - } - b.ResetTimer() - - // Delete a range of the data - for n := 0; n < b.N; n++ { - offset := 10 * n - if err := store.DeleteRange(uint64(offset), uint64(offset+9)); err != nil { - b.Fatalf("err: %s", err) - } - } -} - -func Set(b *testing.B, store raft.StableStore) { - // Run Set a number of times - for n := 0; n < b.N; n++ { - if err := store.Set([]byte{byte(n)}, []byte("val")); err != nil { - b.Fatalf("err: %s", err) - } - } -} - -func Get(b *testing.B, store raft.StableStore) { - // Create some fake data - for i := 1; i < 10; i++ { - if err := store.Set([]byte{byte(i)}, []byte("val")); err != nil { - b.Fatalf("err: %s", err) - } - } - b.ResetTimer() - - // Run Get a number of times - for n := 0; n < b.N; n++ { - if _, err := store.Get([]byte{0x05}); err != nil { - b.Fatalf("err: %s", err) - } - } -} - -func SetUint64(b *testing.B, store raft.StableStore) { - // Run SetUint64 a number of times - for n := 0; n < b.N; n++ { - if err := store.SetUint64([]byte{byte(n)}, uint64(n)); err != nil { - b.Fatalf("err: %s", err) - } - } -} - -func GetUint64(b *testing.B, store raft.StableStore) { - // Create some fake data - for i := 0; i < 10; i++ { - if err := store.SetUint64([]byte{byte(i)}, uint64(i)); err != nil { - b.Fatalf("err: %s", err) - } - } - b.ResetTimer() - - // Run GetUint64 a number of times - for n := 0; n < b.N; n++ { - if _, err := store.Get([]byte{0x05}); err != nil { - b.Fatalf("err: %s", err) - } - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/commands.go b/Godeps/_workspace/src/github.com/hashicorp/raft/commands.go deleted file mode 100644 index fd0194841..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/commands.go +++ /dev/null @@ -1,80 +0,0 @@ -package raft - -// AppendEntriesRequest is the command used to append entries to the -// replicated log. -type AppendEntriesRequest struct { - // Provide the current term and leader - Term uint64 - Leader []byte - - // Provide the previous entries for integrity checking - PrevLogEntry uint64 - PrevLogTerm uint64 - - // New entries to commit - Entries []*Log - - // Commit index on the leader - LeaderCommitIndex uint64 -} - -// AppendEntriesResponse is the response returned from an -// AppendEntriesRequest. -type AppendEntriesResponse struct { - // Newer term if leader is out of date - Term uint64 - - // Last Log is a hint to help accelerate rebuilding slow nodes - LastLog uint64 - - // We may not succeed if we have a conflicting entry - Success bool -} - -// RequestVoteRequest is the command used by a candidate to ask a Raft peer -// for a vote in an election. -type RequestVoteRequest struct { - // Provide the term and our id - Term uint64 - Candidate []byte - - // Used to ensure safety - LastLogIndex uint64 - LastLogTerm uint64 -} - -// RequestVoteResponse is the response returned from a RequestVoteRequest. -type RequestVoteResponse struct { - // Newer term if leader is out of date - Term uint64 - - // Return the peers, so that a node can shutdown on removal - Peers []byte - - // Is the vote granted - Granted bool -} - -// InstallSnapshotRequest is the command sent to a Raft peer to bootstrap its -// log (and state machine) from a snapshot on another peer. -type InstallSnapshotRequest struct { - Term uint64 - Leader []byte - - // These are the last index/term included in the snapshot - LastLogIndex uint64 - LastLogTerm uint64 - - // Peer Set in the snapshot - Peers []byte - - // Size of the snapshot - Size int64 -} - -// InstallSnapshotResponse is the response returned from an -// InstallSnapshotRequest. -type InstallSnapshotResponse struct { - Term uint64 - Success bool -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/config.go b/Godeps/_workspace/src/github.com/hashicorp/raft/config.go deleted file mode 100644 index 047a88abe..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/config.go +++ /dev/null @@ -1,125 +0,0 @@ -package raft - -import ( - "fmt" - "io" - "log" - "time" -) - -// Config provides any necessary configuration to -// the Raft server -type Config struct { - // Time in follower state without a leader before we attempt an election. - HeartbeatTimeout time.Duration - - // Time in candidate state without a leader before we attempt an election. - ElectionTimeout time.Duration - - // Time without an Apply() operation before we heartbeat to ensure - // a timely commit. Due to random staggering, may be delayed as much as - // 2x this value. - CommitTimeout time.Duration - - // MaxAppendEntries controls the maximum number of append entries - // to send at once. We want to strike a balance between efficiency - // and avoiding waste if the follower is going to reject because of - // an inconsistent log. - MaxAppendEntries int - - // If we are a member of a cluster, and RemovePeer is invoked for the - // local node, then we forget all peers and transition into the follower state. - // If ShutdownOnRemove is is set, we additional shutdown Raft. Otherwise, - // we can become a leader of a cluster containing only this node. - ShutdownOnRemove bool - - // DisableBootstrapAfterElect is used to turn off EnableSingleNode - // after the node is elected. This is used to prevent self-election - // if the node is removed from the Raft cluster via RemovePeer. Setting - // it to false will keep the bootstrap mode, allowing the node to self-elect - // and potentially bootstrap a separate cluster. - DisableBootstrapAfterElect bool - - // TrailingLogs controls how many logs we leave after a snapshot. This is - // used so that we can quickly replay logs on a follower instead of being - // forced to send an entire snapshot. - TrailingLogs uint64 - - // SnapshotInterval controls how often we check if we should perform a snapshot. - // We randomly stagger between this value and 2x this value to avoid the entire - // cluster from performing a snapshot at once. - SnapshotInterval time.Duration - - // SnapshotThreshold controls how many outstanding logs there must be before - // we perform a snapshot. This is to prevent excessive snapshots when we can - // just replay a small set of logs. - SnapshotThreshold uint64 - - // EnableSingleNode allows for a single node mode of operation. This - // is false by default, which prevents a lone node from electing itself. - // leader. - EnableSingleNode bool - - // LeaderLeaseTimeout is used to control how long the "lease" lasts - // for being the leader without being able to contact a quorum - // of nodes. If we reach this interval without contact, we will - // step down as leader. - LeaderLeaseTimeout time.Duration - - // LogOutput is used as a sink for logs, unless Logger is specified. - // Defaults to os.Stderr. - LogOutput io.Writer - - // Logger is a user-provided logger. If nil, a logger writing to LogOutput - // is used. - Logger *log.Logger -} - -// DefaultConfig returns a Config with usable defaults. -func DefaultConfig() *Config { - return &Config{ - HeartbeatTimeout: 1000 * time.Millisecond, - ElectionTimeout: 1000 * time.Millisecond, - CommitTimeout: 50 * time.Millisecond, - MaxAppendEntries: 64, - ShutdownOnRemove: true, - DisableBootstrapAfterElect: true, - TrailingLogs: 10240, - SnapshotInterval: 120 * time.Second, - SnapshotThreshold: 8192, - EnableSingleNode: false, - LeaderLeaseTimeout: 500 * time.Millisecond, - } -} - -// ValidateConfig is used to validate a sane configuration -func ValidateConfig(config *Config) error { - if config.HeartbeatTimeout < 5*time.Millisecond { - return fmt.Errorf("Heartbeat timeout is too low") - } - if config.ElectionTimeout < 5*time.Millisecond { - return fmt.Errorf("Election timeout is too low") - } - if config.CommitTimeout < time.Millisecond { - return fmt.Errorf("Commit timeout is too low") - } - if config.MaxAppendEntries <= 0 { - return fmt.Errorf("MaxAppendEntries must be positive") - } - if config.MaxAppendEntries > 1024 { - return fmt.Errorf("MaxAppendEntries is too large") - } - if config.SnapshotInterval < 5*time.Millisecond { - return fmt.Errorf("Snapshot interval is too low") - } - if config.LeaderLeaseTimeout < 5*time.Millisecond { - return fmt.Errorf("Leader lease timeout is too low") - } - if config.LeaderLeaseTimeout > config.HeartbeatTimeout { - return fmt.Errorf("Leader lease timeout cannot be larger than heartbeat timeout") - } - if config.ElectionTimeout < config.HeartbeatTimeout { - return fmt.Errorf("Election timeout must be equal or greater than Heartbeat Timeout") - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot.go b/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot.go deleted file mode 100644 index 1b4611d55..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot.go +++ /dev/null @@ -1,48 +0,0 @@ -package raft - -import ( - "fmt" - "io" -) - -// DiscardSnapshotStore is used to successfully snapshot while -// always discarding the snapshot. This is useful for when the -// log should be truncated but no snapshot should be retained. -// This should never be used for production use, and is only -// suitable for testing. -type DiscardSnapshotStore struct{} - -type DiscardSnapshotSink struct{} - -// NewDiscardSnapshotStore is used to create a new DiscardSnapshotStore. -func NewDiscardSnapshotStore() *DiscardSnapshotStore { - return &DiscardSnapshotStore{} -} - -func (d *DiscardSnapshotStore) Create(index, term uint64, peers []byte) (SnapshotSink, error) { - return &DiscardSnapshotSink{}, nil -} - -func (d *DiscardSnapshotStore) List() ([]*SnapshotMeta, error) { - return nil, nil -} - -func (d *DiscardSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { - return nil, nil, fmt.Errorf("open is not supported") -} - -func (d *DiscardSnapshotSink) Write(b []byte) (int, error) { - return len(b), nil -} - -func (d *DiscardSnapshotSink) Close() error { - return nil -} - -func (d *DiscardSnapshotSink) ID() string { - return "discard" -} - -func (d *DiscardSnapshotSink) Cancel() error { - return nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot_test.go deleted file mode 100644 index 5abedfe2c..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/discard_snapshot_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package raft - -import "testing" - -func TestDiscardSnapshotStoreImpl(t *testing.T) { - var impl interface{} = &DiscardSnapshotStore{} - if _, ok := impl.(SnapshotStore); !ok { - t.Fatalf("DiscardSnapshotStore not a SnapshotStore") - } -} - -func TestDiscardSnapshotSinkImpl(t *testing.T) { - var impl interface{} = &DiscardSnapshotSink{} - if _, ok := impl.(SnapshotSink); !ok { - t.Fatalf("DiscardSnapshotSink not a SnapshotSink") - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot.go b/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot.go deleted file mode 100644 index bda3d6d8e..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot.go +++ /dev/null @@ -1,460 +0,0 @@ -package raft - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "hash" - "hash/crc64" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "strings" - "time" -) - -const ( - testPath = "permTest" - snapPath = "snapshots" - metaFilePath = "meta.json" - stateFilePath = "state.bin" - tmpSuffix = ".tmp" -) - -// FileSnapshotStore implements the SnapshotStore interface and allows -// snapshots to be made on the local disk. -type FileSnapshotStore struct { - path string - retain int - logger *log.Logger -} - -type snapMetaSlice []*fileSnapshotMeta - -// FileSnapshotSink implements SnapshotSink with a file. -type FileSnapshotSink struct { - store *FileSnapshotStore - logger *log.Logger - dir string - meta fileSnapshotMeta - - stateFile *os.File - stateHash hash.Hash64 - buffered *bufio.Writer - - closed bool -} - -// fileSnapshotMeta is stored on disk. We also put a CRC -// on disk so that we can verify the snapshot. -type fileSnapshotMeta struct { - SnapshotMeta - CRC []byte -} - -// bufferedFile is returned when we open a snapshot. This way -// reads are buffered and the file still gets closed. -type bufferedFile struct { - bh *bufio.Reader - fh *os.File -} - -func (b *bufferedFile) Read(p []byte) (n int, err error) { - return b.bh.Read(p) -} - -func (b *bufferedFile) Close() error { - return b.fh.Close() -} - -// NewFileSnapshotStore creates a new FileSnapshotStore based -// on a base directory. The `retain` parameter controls how many -// snapshots are retained. Must be at least 1. -func NewFileSnapshotStore(base string, retain int, logOutput io.Writer) (*FileSnapshotStore, error) { - if retain < 1 { - return nil, fmt.Errorf("must retain at least one snapshot") - } - if logOutput == nil { - logOutput = os.Stderr - } - - // Ensure our path exists - path := filepath.Join(base, snapPath) - if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { - return nil, fmt.Errorf("snapshot path not accessible: %v", err) - } - - // Setup the store - store := &FileSnapshotStore{ - path: path, - retain: retain, - logger: log.New(logOutput, "", log.LstdFlags), - } - - // Do a permissions test - if err := store.testPermissions(); err != nil { - return nil, fmt.Errorf("permissions test failed: %v", err) - } - return store, nil -} - -// testPermissions tries to touch a file in our path to see if it works. -func (f *FileSnapshotStore) testPermissions() error { - path := filepath.Join(f.path, testPath) - fh, err := os.Create(path) - if err != nil { - return err - } - fh.Close() - os.Remove(path) - return nil -} - -// snapshotName generates a name for the snapshot. -func snapshotName(term, index uint64) string { - now := time.Now() - msec := now.UnixNano() / int64(time.Millisecond) - return fmt.Sprintf("%d-%d-%d", term, index, msec) -} - -// Create is used to start a new snapshot -func (f *FileSnapshotStore) Create(index, term uint64, peers []byte) (SnapshotSink, error) { - // Create a new path - name := snapshotName(term, index) - path := filepath.Join(f.path, name+tmpSuffix) - f.logger.Printf("[INFO] snapshot: Creating new snapshot at %s", path) - - // Make the directory - if err := os.MkdirAll(path, 0755); err != nil { - f.logger.Printf("[ERR] snapshot: Failed to make snapshot directory: %v", err) - return nil, err - } - - // Create the sink - sink := &FileSnapshotSink{ - store: f, - logger: f.logger, - dir: path, - meta: fileSnapshotMeta{ - SnapshotMeta: SnapshotMeta{ - ID: name, - Index: index, - Term: term, - Peers: peers, - }, - CRC: nil, - }, - } - - // Write out the meta data - if err := sink.writeMeta(); err != nil { - f.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err) - return nil, err - } - - // Open the state file - statePath := filepath.Join(path, stateFilePath) - fh, err := os.Create(statePath) - if err != nil { - f.logger.Printf("[ERR] snapshot: Failed to create state file: %v", err) - return nil, err - } - sink.stateFile = fh - - // Create a CRC64 hash - sink.stateHash = crc64.New(crc64.MakeTable(crc64.ECMA)) - - // Wrap both the hash and file in a MultiWriter with buffering - multi := io.MultiWriter(sink.stateFile, sink.stateHash) - sink.buffered = bufio.NewWriter(multi) - - // Done - return sink, nil -} - -// List returns available snapshots in the store. -func (f *FileSnapshotStore) List() ([]*SnapshotMeta, error) { - // Get the eligible snapshots - snapshots, err := f.getSnapshots() - if err != nil { - f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err) - return nil, err - } - - var snapMeta []*SnapshotMeta - for _, meta := range snapshots { - snapMeta = append(snapMeta, &meta.SnapshotMeta) - if len(snapMeta) == f.retain { - break - } - } - return snapMeta, nil -} - -// getSnapshots returns all the known snapshots. -func (f *FileSnapshotStore) getSnapshots() ([]*fileSnapshotMeta, error) { - // Get the eligible snapshots - snapshots, err := ioutil.ReadDir(f.path) - if err != nil { - f.logger.Printf("[ERR] snapshot: Failed to scan snapshot dir: %v", err) - return nil, err - } - - // Populate the metadata - var snapMeta []*fileSnapshotMeta - for _, snap := range snapshots { - // Ignore any files - if !snap.IsDir() { - continue - } - - // Ignore any temporary snapshots - dirName := snap.Name() - if strings.HasSuffix(dirName, tmpSuffix) { - f.logger.Printf("[WARN] snapshot: Found temporary snapshot: %v", dirName) - continue - } - - // Try to read the meta data - meta, err := f.readMeta(dirName) - if err != nil { - f.logger.Printf("[WARN] snapshot: Failed to read metadata for %v: %v", dirName, err) - continue - } - - // Append, but only return up to the retain count - snapMeta = append(snapMeta, meta) - } - - // Sort the snapshot, reverse so we get new -> old - sort.Sort(sort.Reverse(snapMetaSlice(snapMeta))) - - return snapMeta, nil -} - -// readMeta is used to read the meta data for a given named backup -func (f *FileSnapshotStore) readMeta(name string) (*fileSnapshotMeta, error) { - // Open the meta file - metaPath := filepath.Join(f.path, name, metaFilePath) - fh, err := os.Open(metaPath) - if err != nil { - return nil, err - } - defer fh.Close() - - // Buffer the file IO - buffered := bufio.NewReader(fh) - - // Read in the JSON - meta := &fileSnapshotMeta{} - dec := json.NewDecoder(buffered) - if err := dec.Decode(meta); err != nil { - return nil, err - } - return meta, nil -} - -// Open takes a snapshot ID and returns a ReadCloser for that snapshot. -func (f *FileSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, error) { - // Get the metadata - meta, err := f.readMeta(id) - if err != nil { - f.logger.Printf("[ERR] snapshot: Failed to get meta data to open snapshot: %v", err) - return nil, nil, err - } - - // Open the state file - statePath := filepath.Join(f.path, id, stateFilePath) - fh, err := os.Open(statePath) - if err != nil { - f.logger.Printf("[ERR] snapshot: Failed to open state file: %v", err) - return nil, nil, err - } - - // Create a CRC64 hash - stateHash := crc64.New(crc64.MakeTable(crc64.ECMA)) - - // Compute the hash - _, err = io.Copy(stateHash, fh) - if err != nil { - f.logger.Printf("[ERR] snapshot: Failed to read state file: %v", err) - fh.Close() - return nil, nil, err - } - - // Verify the hash - computed := stateHash.Sum(nil) - if bytes.Compare(meta.CRC, computed) != 0 { - f.logger.Printf("[ERR] snapshot: CRC checksum failed (stored: %v computed: %v)", - meta.CRC, computed) - fh.Close() - return nil, nil, fmt.Errorf("CRC mismatch") - } - - // Seek to the start - if _, err := fh.Seek(0, 0); err != nil { - f.logger.Printf("[ERR] snapshot: State file seek failed: %v", err) - fh.Close() - return nil, nil, err - } - - // Return a buffered file - buffered := &bufferedFile{ - bh: bufio.NewReader(fh), - fh: fh, - } - - return &meta.SnapshotMeta, buffered, nil -} - -// ReapSnapshots reaps any snapshots beyond the retain count. -func (f *FileSnapshotStore) ReapSnapshots() error { - snapshots, err := f.getSnapshots() - if err != nil { - f.logger.Printf("[ERR] snapshot: Failed to get snapshots: %v", err) - return err - } - - for i := f.retain; i < len(snapshots); i++ { - path := filepath.Join(f.path, snapshots[i].ID) - f.logger.Printf("[INFO] snapshot: reaping snapshot %v", path) - if err := os.RemoveAll(path); err != nil { - f.logger.Printf("[ERR] snapshot: Failed to reap snapshot %v: %v", path, err) - return err - } - } - return nil -} - -// ID returns the ID of the snapshot, can be used with Open() -// after the snapshot is finalized. -func (s *FileSnapshotSink) ID() string { - return s.meta.ID -} - -// Write is used to append to the state file. We write to the -// buffered IO object to reduce the amount of context switches. -func (s *FileSnapshotSink) Write(b []byte) (int, error) { - return s.buffered.Write(b) -} - -// Close is used to indicate a successful end. -func (s *FileSnapshotSink) Close() error { - // Make sure close is idempotent - if s.closed { - return nil - } - s.closed = true - - // Close the open handles - if err := s.finalize(); err != nil { - s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err) - return err - } - - // Write out the meta data - if err := s.writeMeta(); err != nil { - s.logger.Printf("[ERR] snapshot: Failed to write metadata: %v", err) - return err - } - - // Move the directory into place - newPath := strings.TrimSuffix(s.dir, tmpSuffix) - if err := os.Rename(s.dir, newPath); err != nil { - s.logger.Printf("[ERR] snapshot: Failed to move snapshot into place: %v", err) - return err - } - - // Reap any old snapshots - s.store.ReapSnapshots() - return nil -} - -// Cancel is used to indicate an unsuccessful end. -func (s *FileSnapshotSink) Cancel() error { - // Make sure close is idempotent - if s.closed { - return nil - } - s.closed = true - - // Close the open handles - if err := s.finalize(); err != nil { - s.logger.Printf("[ERR] snapshot: Failed to finalize snapshot: %v", err) - return err - } - - // Attempt to remove all artifacts - return os.RemoveAll(s.dir) -} - -// finalize is used to close all of our resources. -func (s *FileSnapshotSink) finalize() error { - // Flush any remaining data - if err := s.buffered.Flush(); err != nil { - return err - } - - // Get the file size - stat, statErr := s.stateFile.Stat() - - // Close the file - if err := s.stateFile.Close(); err != nil { - return err - } - - // Set the file size, check after we close - if statErr != nil { - return statErr - } - s.meta.Size = stat.Size() - - // Set the CRC - s.meta.CRC = s.stateHash.Sum(nil) - return nil -} - -// writeMeta is used to write out the metadata we have. -func (s *FileSnapshotSink) writeMeta() error { - // Open the meta file - metaPath := filepath.Join(s.dir, metaFilePath) - fh, err := os.Create(metaPath) - if err != nil { - return err - } - defer fh.Close() - - // Buffer the file IO - buffered := bufio.NewWriter(fh) - defer buffered.Flush() - - // Write out as JSON - enc := json.NewEncoder(buffered) - if err := enc.Encode(&s.meta); err != nil { - return err - } - return nil -} - -// Implement the sort interface for []*fileSnapshotMeta. -func (s snapMetaSlice) Len() int { - return len(s) -} - -func (s snapMetaSlice) Less(i, j int) bool { - if s[i].Term != s[j].Term { - return s[i].Term < s[j].Term - } - if s[i].Index != s[j].Index { - return s[i].Index < s[j].Index - } - return s[i].ID < s[j].ID -} - -func (s snapMetaSlice) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot_test.go deleted file mode 100644 index 7620c1938..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/file_snapshot_test.go +++ /dev/null @@ -1,343 +0,0 @@ -package raft - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "runtime" - "testing" -) - -func FileSnapTest(t *testing.T) (string, *FileSnapshotStore) { - // Create a test dir - dir, err := ioutil.TempDir("", "raft") - if err != nil { - t.Fatalf("err: %v ", err) - } - - snap, err := NewFileSnapshotStore(dir, 3, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - return dir, snap -} - -func TestFileSnapshotStoreImpl(t *testing.T) { - var impl interface{} = &FileSnapshotStore{} - if _, ok := impl.(SnapshotStore); !ok { - t.Fatalf("FileSnapshotStore not a SnapshotStore") - } -} - -func TestFileSnapshotSinkImpl(t *testing.T) { - var impl interface{} = &FileSnapshotSink{} - if _, ok := impl.(SnapshotSink); !ok { - t.Fatalf("FileSnapshotSink not a SnapshotSink") - } -} - -func TestFileSS_CreateSnapshotMissingParentDir(t *testing.T) { - parent, err := ioutil.TempDir("", "raft") - if err != nil { - t.Fatalf("err: %v ", err) - } - defer os.RemoveAll(parent) - - dir, err := ioutil.TempDir(parent, "raft") - if err != nil { - t.Fatalf("err: %v ", err) - } - - snap, err := NewFileSnapshotStore(dir, 3, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - os.RemoveAll(parent) - peers := []byte("all my lovely friends") - _, err = snap.Create(10, 3, peers) - if err != nil { - t.Fatalf("should not fail when using non existing parent") - } - -} -func TestFileSS_CreateSnapshot(t *testing.T) { - // Create a test dir - dir, err := ioutil.TempDir("", "raft") - if err != nil { - t.Fatalf("err: %v ", err) - } - defer os.RemoveAll(dir) - - snap, err := NewFileSnapshotStore(dir, 3, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Check no snapshots - snaps, err := snap.List() - if err != nil { - t.Fatalf("err: %v", err) - } - if len(snaps) != 0 { - t.Fatalf("did not expect any snapshots: %v", snaps) - } - - // Create a new sink - peers := []byte("all my lovely friends") - sink, err := snap.Create(10, 3, peers) - if err != nil { - t.Fatalf("err: %v", err) - } - - // The sink is not done, should not be in a list! - snaps, err = snap.List() - if err != nil { - t.Fatalf("err: %v", err) - } - if len(snaps) != 0 { - t.Fatalf("did not expect any snapshots: %v", snaps) - } - - // Write to the sink - _, err = sink.Write([]byte("first\n")) - if err != nil { - t.Fatalf("err: %v", err) - } - _, err = sink.Write([]byte("second\n")) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Done! - err = sink.Close() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should have a snapshot! - snaps, err = snap.List() - if err != nil { - t.Fatalf("err: %v", err) - } - if len(snaps) != 1 { - t.Fatalf("expect a snapshots: %v", snaps) - } - - // Check the latest - latest := snaps[0] - if latest.Index != 10 { - t.Fatalf("bad snapshot: %v", *latest) - } - if latest.Term != 3 { - t.Fatalf("bad snapshot: %v", *latest) - } - if bytes.Compare(latest.Peers, peers) != 0 { - t.Fatalf("bad snapshot: %v", *latest) - } - if latest.Size != 13 { - t.Fatalf("bad snapshot: %v", *latest) - } - - // Read the snapshot - _, r, err := snap.Open(latest.ID) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Read out everything - var buf bytes.Buffer - if _, err := io.Copy(&buf, r); err != nil { - t.Fatalf("err: %v", err) - } - if err := r.Close(); err != nil { - t.Fatalf("err: %v", err) - } - - // Ensure a match - if bytes.Compare(buf.Bytes(), []byte("first\nsecond\n")) != 0 { - t.Fatalf("content mismatch") - } -} - -func TestFileSS_CancelSnapshot(t *testing.T) { - // Create a test dir - dir, err := ioutil.TempDir("", "raft") - if err != nil { - t.Fatalf("err: %v ", err) - } - defer os.RemoveAll(dir) - - snap, err := NewFileSnapshotStore(dir, 3, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Create a new sink - peers := []byte("all my lovely friends") - sink, err := snap.Create(10, 3, peers) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Cancel the snapshot! Should delete - err = sink.Cancel() - if err != nil { - t.Fatalf("err: %v", err) - } - - // The sink is canceled, should not be in a list! - snaps, err := snap.List() - if err != nil { - t.Fatalf("err: %v", err) - } - if len(snaps) != 0 { - t.Fatalf("did not expect any snapshots: %v", snaps) - } -} - -func TestFileSS_Retention(t *testing.T) { - // Create a test dir - dir, err := ioutil.TempDir("", "raft") - if err != nil { - t.Fatalf("err: %v ", err) - } - defer os.RemoveAll(dir) - - snap, err := NewFileSnapshotStore(dir, 2, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Create a new sink - peers := []byte("all my lovely friends") - - // Create a few snapshots - for i := 10; i < 15; i++ { - sink, err := snap.Create(uint64(i), 3, peers) - if err != nil { - t.Fatalf("err: %v", err) - } - err = sink.Close() - if err != nil { - t.Fatalf("err: %v", err) - } - } - - // Should only have 2 listed! - snaps, err := snap.List() - if err != nil { - t.Fatalf("err: %v", err) - } - if len(snaps) != 2 { - t.Fatalf("expect 2 snapshots: %v", snaps) - } - - // Check they are the latest - if snaps[0].Index != 14 { - t.Fatalf("bad snap: %#v", *snaps[0]) - } - if snaps[1].Index != 13 { - t.Fatalf("bad snap: %#v", *snaps[1]) - } -} - -func TestFileSS_BadPerm(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("skipping file permission test on windows") - } - - // Create a temp dir - dir1, err := ioutil.TempDir("", "raft") - if err != nil { - t.Fatalf("err: %s", err) - } - defer os.RemoveAll(dir1) - - // Create a sub dir and remove all permissions - dir2, err := ioutil.TempDir(dir1, "badperm") - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.Chmod(dir2, 000); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Chmod(dir2, 777) // Set perms back for delete - - // Should fail - if _, err := NewFileSnapshotStore(dir2, 3, nil); err == nil { - t.Fatalf("should fail to use dir with bad perms") - } -} - -func TestFileSS_MissingParentDir(t *testing.T) { - parent, err := ioutil.TempDir("", "raft") - if err != nil { - t.Fatalf("err: %v ", err) - } - defer os.RemoveAll(parent) - - dir, err := ioutil.TempDir(parent, "raft") - if err != nil { - t.Fatalf("err: %v ", err) - } - - os.RemoveAll(parent) - _, err = NewFileSnapshotStore(dir, 3, nil) - if err != nil { - t.Fatalf("should not fail when using non existing parent") - } -} - -func TestFileSS_Ordering(t *testing.T) { - // Create a test dir - dir, err := ioutil.TempDir("", "raft") - if err != nil { - t.Fatalf("err: %v ", err) - } - defer os.RemoveAll(dir) - - snap, err := NewFileSnapshotStore(dir, 3, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Create a new sink - peers := []byte("all my lovely friends") - - sink, err := snap.Create(130350, 5, peers) - if err != nil { - t.Fatalf("err: %v", err) - } - err = sink.Close() - if err != nil { - t.Fatalf("err: %v", err) - } - - sink, err = snap.Create(204917, 36, peers) - if err != nil { - t.Fatalf("err: %v", err) - } - err = sink.Close() - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should only have 2 listed! - snaps, err := snap.List() - if err != nil { - t.Fatalf("err: %v", err) - } - if len(snaps) != 2 { - t.Fatalf("expect 2 snapshots: %v", snaps) - } - - // Check they are ordered - if snaps[0].Term != 36 { - t.Fatalf("bad snap: %#v", *snaps[0]) - } - if snaps[1].Term != 5 { - t.Fatalf("bad snap: %#v", *snaps[1]) - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/fsm.go b/Godeps/_workspace/src/github.com/hashicorp/raft/fsm.go deleted file mode 100644 index ea8ab548d..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/fsm.go +++ /dev/null @@ -1,37 +0,0 @@ -package raft - -import ( - "io" -) - -// FSM provides an interface that can be implemented by -// clients to make use of the replicated log. -type FSM interface { - // Apply log is invoked once a log entry is committed. - Apply(*Log) interface{} - - // Snapshot is used to support log compaction. This call should - // return an FSMSnapshot which can be used to save a point-in-time - // snapshot of the FSM. Apply and Snapshot are not called in multiple - // threads, but Apply will be called concurrently with Persist. This means - // the FSM should be implemented in a fashion that allows for concurrent - // updates while a snapshot is happening. - Snapshot() (FSMSnapshot, error) - - // Restore is used to restore an FSM from a snapshot. It is not called - // concurrently with any other command. The FSM must discard all previous - // state. - Restore(io.ReadCloser) error -} - -// FSMSnapshot is returned by an FSM in response to a Snapshot -// It must be safe to invoke FSMSnapshot methods with concurrent -// calls to Apply. -type FSMSnapshot interface { - // Persist should dump all necessary state to the WriteCloser 'sink', - // and call sink.Close() when finished or call sink.Cancel() on error. - Persist(sink SnapshotSink) error - - // Release is invoked when we are finished with the snapshot. - Release() -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/future.go b/Godeps/_workspace/src/github.com/hashicorp/raft/future.go deleted file mode 100644 index 854e1ac92..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/future.go +++ /dev/null @@ -1,182 +0,0 @@ -package raft - -import ( - "sync" - "time" -) - -// Future is used to represent an action that may occur in the future. -type Future interface { - Error() error -} - -// ApplyFuture is used for Apply() and can returns the FSM response. -type ApplyFuture interface { - Future - Response() interface{} - Index() uint64 -} - -// errorFuture is used to return a static error. -type errorFuture struct { - err error -} - -func (e errorFuture) Error() error { - return e.err -} - -func (e errorFuture) Response() interface{} { - return nil -} - -func (e errorFuture) Index() uint64 { - return 0 -} - -// deferError can be embedded to allow a future -// to provide an error in the future. -type deferError struct { - err error - errCh chan error - responded bool -} - -func (d *deferError) init() { - d.errCh = make(chan error, 1) -} - -func (d *deferError) Error() error { - if d.err != nil { - return d.err - } - if d.errCh == nil { - panic("waiting for response on nil channel") - } - d.err = <-d.errCh - return d.err -} - -func (d *deferError) respond(err error) { - if d.errCh == nil { - return - } - if d.responded { - return - } - d.errCh <- err - close(d.errCh) - d.responded = true -} - -// logFuture is used to apply a log entry and waits until -// the log is considered committed. -type logFuture struct { - deferError - log Log - policy quorumPolicy - response interface{} - dispatch time.Time -} - -func (l *logFuture) Response() interface{} { - return l.response -} - -func (l *logFuture) Index() uint64 { - return l.log.Index -} - -type peerFuture struct { - deferError - peers []string -} - -type shutdownFuture struct { - raft *Raft -} - -func (s *shutdownFuture) Error() error { - for s.raft.getRoutines() > 0 { - time.Sleep(5 * time.Millisecond) - } - return nil -} - -// snapshotFuture is used for waiting on a snapshot to complete. -type snapshotFuture struct { - deferError -} - -// reqSnapshotFuture is used for requesting a snapshot start. -// It is only used internally. -type reqSnapshotFuture struct { - deferError - - // snapshot details provided by the FSM runner before responding - index uint64 - term uint64 - peers []string - snapshot FSMSnapshot -} - -// restoreFuture is used for requesting an FSM to perform a -// snapshot restore. Used internally only. -type restoreFuture struct { - deferError - ID string -} - -// verifyFuture is used to verify the current node is still -// the leader. This is to prevent a stale read. -type verifyFuture struct { - deferError - notifyCh chan *verifyFuture - quorumSize int - votes int - voteLock sync.Mutex -} - -// vote is used to respond to a verifyFuture. -// This may block when responding on the notifyCh. -func (v *verifyFuture) vote(leader bool) { - v.voteLock.Lock() - defer v.voteLock.Unlock() - - // Guard against having notified already - if v.notifyCh == nil { - return - } - - if leader { - v.votes++ - if v.votes >= v.quorumSize { - v.notifyCh <- v - v.notifyCh = nil - } - } else { - v.notifyCh <- v - v.notifyCh = nil - } -} - -// appendFuture is used for waiting on a pipelined append -// entries RPC. -type appendFuture struct { - deferError - start time.Time - args *AppendEntriesRequest - resp *AppendEntriesResponse -} - -func (a *appendFuture) Start() time.Time { - return a.start -} - -func (a *appendFuture) Request() *AppendEntriesRequest { - return a.args -} - -func (a *appendFuture) Response() *AppendEntriesResponse { - return a.resp -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/inflight.go b/Godeps/_workspace/src/github.com/hashicorp/raft/inflight.go deleted file mode 100644 index 7014ff503..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/inflight.go +++ /dev/null @@ -1,213 +0,0 @@ -package raft - -import ( - "container/list" - "sync" -) - -// QuorumPolicy allows individual logFutures to have different -// commitment rules while still using the inflight mechanism. -type quorumPolicy interface { - // Checks if a commit from a given peer is enough to - // satisfy the commitment rules - Commit() bool - - // Checks if a commit is committed - IsCommitted() bool -} - -// MajorityQuorum is used by Apply transactions and requires -// a simple majority of nodes. -type majorityQuorum struct { - count int - votesNeeded int -} - -func newMajorityQuorum(clusterSize int) *majorityQuorum { - votesNeeded := (clusterSize / 2) + 1 - return &majorityQuorum{count: 0, votesNeeded: votesNeeded} -} - -func (m *majorityQuorum) Commit() bool { - m.count++ - return m.count >= m.votesNeeded -} - -func (m *majorityQuorum) IsCommitted() bool { - return m.count >= m.votesNeeded -} - -// Inflight is used to track operations that are still in-flight. -type inflight struct { - sync.Mutex - committed *list.List - commitCh chan struct{} - minCommit uint64 - maxCommit uint64 - operations map[uint64]*logFuture - stopCh chan struct{} -} - -// NewInflight returns an inflight struct that notifies -// the provided channel when logs are finished committing. -func newInflight(commitCh chan struct{}) *inflight { - return &inflight{ - committed: list.New(), - commitCh: commitCh, - minCommit: 0, - maxCommit: 0, - operations: make(map[uint64]*logFuture), - stopCh: make(chan struct{}), - } -} - -// Start is used to mark a logFuture as being inflight. It -// also commits the entry, as it is assumed the leader is -// starting. -func (i *inflight) Start(l *logFuture) { - i.Lock() - defer i.Unlock() - i.start(l) -} - -// StartAll is used to mark a list of logFuture's as being -// inflight. It also commits each entry as the leader is -// assumed to be starting. -func (i *inflight) StartAll(logs []*logFuture) { - i.Lock() - defer i.Unlock() - for _, l := range logs { - i.start(l) - } -} - -// start is used to mark a single entry as inflight, -// must be invoked with the lock held. -func (i *inflight) start(l *logFuture) { - idx := l.log.Index - i.operations[idx] = l - - if idx > i.maxCommit { - i.maxCommit = idx - } - if i.minCommit == 0 { - i.minCommit = idx - } - i.commit(idx) -} - -// Cancel is used to cancel all in-flight operations. -// This is done when the leader steps down, and all futures -// are sent the given error. -func (i *inflight) Cancel(err error) { - // Close the channel first to unblock any pending commits - close(i.stopCh) - - // Lock after close to avoid deadlock - i.Lock() - defer i.Unlock() - - // Respond to all inflight operations - for _, op := range i.operations { - op.respond(err) - } - - // Clear all the committed but not processed - for e := i.committed.Front(); e != nil; e = e.Next() { - e.Value.(*logFuture).respond(err) - } - - // Clear the map - i.operations = make(map[uint64]*logFuture) - - // Clear the list of committed - i.committed = list.New() - - // Close the commmitCh - close(i.commitCh) - - // Reset indexes - i.minCommit = 0 - i.maxCommit = 0 -} - -// Committed returns all the committed operations in order. -func (i *inflight) Committed() (l *list.List) { - i.Lock() - l, i.committed = i.committed, list.New() - i.Unlock() - return l -} - -// Commit is used by leader replication routines to indicate that -// a follower was finished committing a log to disk. -func (i *inflight) Commit(index uint64) { - i.Lock() - defer i.Unlock() - i.commit(index) -} - -// CommitRange is used to commit a range of indexes inclusively. -// It is optimized to avoid commits for indexes that are not tracked. -func (i *inflight) CommitRange(minIndex, maxIndex uint64) { - i.Lock() - defer i.Unlock() - - // Update the minimum index - minIndex = max(i.minCommit, minIndex) - - // Commit each index - for idx := minIndex; idx <= maxIndex; idx++ { - i.commit(idx) - } -} - -// commit is used to commit a single index. Must be called with the lock held. -func (i *inflight) commit(index uint64) { - op, ok := i.operations[index] - if !ok { - // Ignore if not in the map, as it may be committed already - return - } - - // Check if we've satisfied the commit - if !op.policy.Commit() { - return - } - - // Cannot commit if this is not the minimum inflight. This can happen - // if the quorum size changes, meaning a previous commit requires a larger - // quorum that this commit. We MUST block until the previous log is committed, - // otherwise logs will be applied out of order. - if index != i.minCommit { - return - } - -NOTIFY: - // Add the operation to the committed list - i.committed.PushBack(op) - - // Stop tracking since it is committed - delete(i.operations, index) - - // Update the indexes - if index == i.maxCommit { - i.minCommit = 0 - i.maxCommit = 0 - - } else { - i.minCommit++ - } - - // Check if the next in-flight operation is ready - if i.minCommit != 0 { - op = i.operations[i.minCommit] - if op.policy.IsCommitted() { - index = i.minCommit - goto NOTIFY - } - } - - // Async notify of ready operations - asyncNotifyCh(i.commitCh) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/inflight_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/inflight_test.go deleted file mode 100644 index a9f57d6ea..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/inflight_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package raft - -import ( - "fmt" - "testing" -) - -func TestInflight_StartCommit(t *testing.T) { - commitCh := make(chan struct{}, 1) - in := newInflight(commitCh) - - // Commit a transaction as being in flight - l := &logFuture{log: Log{Index: 1}} - l.policy = newMajorityQuorum(5) - in.Start(l) - - // Commit 3 times - in.Commit(1) - if in.Committed().Len() != 0 { - t.Fatalf("should not be commited") - } - - in.Commit(1) - if in.Committed().Len() != 1 { - t.Fatalf("should be commited") - } - - // Already committed but should work anyways - in.Commit(1) -} - -func TestInflight_Cancel(t *testing.T) { - commitCh := make(chan struct{}, 1) - in := newInflight(commitCh) - - // Commit a transaction as being in flight - l := &logFuture{ - log: Log{Index: 1}, - } - l.init() - l.policy = newMajorityQuorum(3) - in.Start(l) - - // Cancel with an error - err := fmt.Errorf("error 1") - in.Cancel(err) - - // Should get an error return - if l.Error() != err { - t.Fatalf("expected error") - } -} - -func TestInflight_StartAll(t *testing.T) { - commitCh := make(chan struct{}, 1) - in := newInflight(commitCh) - - // Commit a few transaction as being in flight - l1 := &logFuture{log: Log{Index: 2}} - l1.policy = newMajorityQuorum(5) - l2 := &logFuture{log: Log{Index: 3}} - l2.policy = newMajorityQuorum(5) - l3 := &logFuture{log: Log{Index: 4}} - l3.policy = newMajorityQuorum(5) - - // Start all the entries - in.StartAll([]*logFuture{l1, l2, l3}) - - // Commit ranges - in.CommitRange(1, 5) - in.CommitRange(1, 4) - in.CommitRange(1, 10) - - // Should get 3 back - if in.Committed().Len() != 3 { - t.Fatalf("expected all 3 to commit") - } -} - -func TestInflight_CommitRange(t *testing.T) { - commitCh := make(chan struct{}, 1) - in := newInflight(commitCh) - - // Commit a few transaction as being in flight - l1 := &logFuture{log: Log{Index: 2}} - l1.policy = newMajorityQuorum(5) - in.Start(l1) - - l2 := &logFuture{log: Log{Index: 3}} - l2.policy = newMajorityQuorum(5) - in.Start(l2) - - l3 := &logFuture{log: Log{Index: 4}} - l3.policy = newMajorityQuorum(5) - in.Start(l3) - - // Commit ranges - in.CommitRange(1, 5) - in.CommitRange(1, 4) - in.CommitRange(1, 10) - - // Should get 3 back - if in.Committed().Len() != 3 { - t.Fatalf("expected all 3 to commit") - } -} - -// Should panic if we commit non contiguously! -func TestInflight_NonContiguous(t *testing.T) { - commitCh := make(chan struct{}, 1) - in := newInflight(commitCh) - - // Commit a few transaction as being in flight - l1 := &logFuture{log: Log{Index: 2}} - l1.policy = newMajorityQuorum(5) - in.Start(l1) - - l2 := &logFuture{log: Log{Index: 3}} - l2.policy = newMajorityQuorum(5) - in.Start(l2) - - in.Commit(3) - in.Commit(3) - in.Commit(3) // panic! - - if in.Committed().Len() != 0 { - t.Fatalf("should not commit") - } - - in.Commit(2) - in.Commit(2) - in.Commit(2) // panic! - - committed := in.Committed() - if committed.Len() != 2 { - t.Fatalf("should commit both") - } - - current := committed.Front() - l := current.Value.(*logFuture) - if l.log.Index != 2 { - t.Fatalf("bad: %v", *l) - } - - current = current.Next() - l = current.Value.(*logFuture) - if l.log.Index != 3 { - t.Fatalf("bad: %v", *l) - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_store.go b/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_store.go deleted file mode 100644 index 6e4dfd020..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_store.go +++ /dev/null @@ -1,116 +0,0 @@ -package raft - -import ( - "sync" -) - -// InmemStore implements the LogStore and StableStore interface. -// It should NOT EVER be used for production. It is used only for -// unit tests. Use the MDBStore implementation instead. -type InmemStore struct { - l sync.RWMutex - lowIndex uint64 - highIndex uint64 - logs map[uint64]*Log - kv map[string][]byte - kvInt map[string]uint64 -} - -// NewInmemStore returns a new in-memory backend. Do not ever -// use for production. Only for testing. -func NewInmemStore() *InmemStore { - i := &InmemStore{ - logs: make(map[uint64]*Log), - kv: make(map[string][]byte), - kvInt: make(map[string]uint64), - } - return i -} - -// FirstIndex implements the LogStore interface. -func (i *InmemStore) FirstIndex() (uint64, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.lowIndex, nil -} - -// LastIndex implements the LogStore interface. -func (i *InmemStore) LastIndex() (uint64, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.highIndex, nil -} - -// GetLog implements the LogStore interface. -func (i *InmemStore) GetLog(index uint64, log *Log) error { - i.l.RLock() - defer i.l.RUnlock() - l, ok := i.logs[index] - if !ok { - return ErrLogNotFound - } - *log = *l - return nil -} - -// StoreLog implements the LogStore interface. -func (i *InmemStore) StoreLog(log *Log) error { - return i.StoreLogs([]*Log{log}) -} - -// StoreLogs implements the LogStore interface. -func (i *InmemStore) StoreLogs(logs []*Log) error { - i.l.Lock() - defer i.l.Unlock() - for _, l := range logs { - i.logs[l.Index] = l - if i.lowIndex == 0 { - i.lowIndex = l.Index - } - if l.Index > i.highIndex { - i.highIndex = l.Index - } - } - return nil -} - -// DeleteRange implements the LogStore interface. -func (i *InmemStore) DeleteRange(min, max uint64) error { - i.l.Lock() - defer i.l.Unlock() - for j := min; j <= max; j++ { - delete(i.logs, j) - } - i.lowIndex = max + 1 - return nil -} - -// Set implements the StableStore interface. -func (i *InmemStore) Set(key []byte, val []byte) error { - i.l.Lock() - defer i.l.Unlock() - i.kv[string(key)] = val - return nil -} - -// Get implements the StableStore interface. -func (i *InmemStore) Get(key []byte) ([]byte, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.kv[string(key)], nil -} - -// SetUint64 implements the StableStore interface. -func (i *InmemStore) SetUint64(key []byte, val uint64) error { - i.l.Lock() - defer i.l.Unlock() - i.kvInt[string(key)] = val - return nil -} - -// GetUint64 implements the StableStore interface. -func (i *InmemStore) GetUint64(key []byte) (uint64, error) { - i.l.RLock() - defer i.l.RUnlock() - return i.kvInt[string(key)], nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport.go b/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport.go deleted file mode 100644 index 994d06d8f..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport.go +++ /dev/null @@ -1,315 +0,0 @@ -package raft - -import ( - "fmt" - "io" - "sync" - "time" -) - -// NewInmemAddr returns a new in-memory addr with -// a randomly generate UUID as the ID. -func NewInmemAddr() string { - return generateUUID() -} - -// inmemPipeline is used to pipeline requests for the in-mem transport. -type inmemPipeline struct { - trans *InmemTransport - peer *InmemTransport - peerAddr string - - doneCh chan AppendFuture - inprogressCh chan *inmemPipelineInflight - - shutdown bool - shutdownCh chan struct{} - shutdownLock sync.Mutex -} - -type inmemPipelineInflight struct { - future *appendFuture - respCh <-chan RPCResponse -} - -// InmemTransport Implements the Transport interface, to allow Raft to be -// tested in-memory without going over a network. -type InmemTransport struct { - sync.RWMutex - consumerCh chan RPC - localAddr string - peers map[string]*InmemTransport - pipelines []*inmemPipeline - timeout time.Duration -} - -// NewInmemTransport is used to initialize a new transport -// and generates a random local address. -func NewInmemTransport() (string, *InmemTransport) { - addr := NewInmemAddr() - trans := &InmemTransport{ - consumerCh: make(chan RPC, 16), - localAddr: addr, - peers: make(map[string]*InmemTransport), - timeout: 50 * time.Millisecond, - } - return addr, trans -} - -// SetHeartbeatHandler is used to set optional fast-path for -// heartbeats, not supported for this transport. -func (i *InmemTransport) SetHeartbeatHandler(cb func(RPC)) { -} - -// Consumer implements the Transport interface. -func (i *InmemTransport) Consumer() <-chan RPC { - return i.consumerCh -} - -// LocalAddr implements the Transport interface. -func (i *InmemTransport) LocalAddr() string { - return i.localAddr -} - -// AppendEntriesPipeline returns an interface that can be used to pipeline -// AppendEntries requests. -func (i *InmemTransport) AppendEntriesPipeline(target string) (AppendPipeline, error) { - i.RLock() - peer, ok := i.peers[target] - i.RUnlock() - if !ok { - return nil, fmt.Errorf("failed to connect to peer: %v", target) - } - pipeline := newInmemPipeline(i, peer, target) - i.Lock() - i.pipelines = append(i.pipelines, pipeline) - i.Unlock() - return pipeline, nil -} - -// AppendEntries implements the Transport interface. -func (i *InmemTransport) AppendEntries(target string, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { - rpcResp, err := i.makeRPC(target, args, nil, i.timeout) - if err != nil { - return err - } - - // Copy the result back - out := rpcResp.Response.(*AppendEntriesResponse) - *resp = *out - return nil -} - -// RequestVote implements the Transport interface. -func (i *InmemTransport) RequestVote(target string, args *RequestVoteRequest, resp *RequestVoteResponse) error { - rpcResp, err := i.makeRPC(target, args, nil, i.timeout) - if err != nil { - return err - } - - // Copy the result back - out := rpcResp.Response.(*RequestVoteResponse) - *resp = *out - return nil -} - -// InstallSnapshot implements the Transport interface. -func (i *InmemTransport) InstallSnapshot(target string, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { - rpcResp, err := i.makeRPC(target, args, data, 10*i.timeout) - if err != nil { - return err - } - - // Copy the result back - out := rpcResp.Response.(*InstallSnapshotResponse) - *resp = *out - return nil -} - -func (i *InmemTransport) makeRPC(target string, args interface{}, r io.Reader, timeout time.Duration) (rpcResp RPCResponse, err error) { - i.RLock() - peer, ok := i.peers[target] - i.RUnlock() - - if !ok { - err = fmt.Errorf("failed to connect to peer: %v", target) - return - } - - // Send the RPC over - respCh := make(chan RPCResponse) - peer.consumerCh <- RPC{ - Command: args, - Reader: r, - RespChan: respCh, - } - - // Wait for a response - select { - case rpcResp = <-respCh: - if rpcResp.Error != nil { - err = rpcResp.Error - } - case <-time.After(timeout): - err = fmt.Errorf("command timed out") - } - return -} - -// EncodePeer implements the Transport interface. It uses the UUID as the -// address directly. -func (i *InmemTransport) EncodePeer(p string) []byte { - return []byte(p) -} - -// DecodePeer implements the Transport interface. It wraps the UUID in an -// InmemAddr. -func (i *InmemTransport) DecodePeer(buf []byte) string { - return string(buf) -} - -// Connect is used to connect this transport to another transport for -// a given peer name. This allows for local routing. -func (i *InmemTransport) Connect(peer string, trans *InmemTransport) { - i.Lock() - defer i.Unlock() - i.peers[peer] = trans -} - -// Disconnect is used to remove the ability to route to a given peer. -func (i *InmemTransport) Disconnect(peer string) { - i.Lock() - defer i.Unlock() - delete(i.peers, peer) - - // Disconnect any pipelines - n := len(i.pipelines) - for idx := 0; idx < n; idx++ { - if i.pipelines[idx].peerAddr == peer { - i.pipelines[idx].Close() - i.pipelines[idx], i.pipelines[n-1] = i.pipelines[n-1], nil - idx-- - n-- - } - } - i.pipelines = i.pipelines[:n] -} - -// DisconnectAll is used to remove all routes to peers. -func (i *InmemTransport) DisconnectAll() { - i.Lock() - defer i.Unlock() - i.peers = make(map[string]*InmemTransport) - - // Handle pipelines - for _, pipeline := range i.pipelines { - pipeline.Close() - } - i.pipelines = nil -} - -func newInmemPipeline(trans *InmemTransport, peer *InmemTransport, addr string) *inmemPipeline { - i := &inmemPipeline{ - trans: trans, - peer: peer, - peerAddr: addr, - doneCh: make(chan AppendFuture, 16), - inprogressCh: make(chan *inmemPipelineInflight, 16), - shutdownCh: make(chan struct{}), - } - go i.decodeResponses() - return i -} - -func (i *inmemPipeline) decodeResponses() { - timeout := i.trans.timeout - for { - select { - case inp := <-i.inprogressCh: - var timeoutCh <-chan time.Time - if timeout > 0 { - timeoutCh = time.After(timeout) - } - - select { - case rpcResp := <-inp.respCh: - // Copy the result back - *inp.future.resp = *rpcResp.Response.(*AppendEntriesResponse) - inp.future.respond(rpcResp.Error) - - select { - case i.doneCh <- inp.future: - case <-i.shutdownCh: - return - } - - case <-timeoutCh: - inp.future.respond(fmt.Errorf("command timed out")) - select { - case i.doneCh <- inp.future: - case <-i.shutdownCh: - return - } - - case <-i.shutdownCh: - return - } - case <-i.shutdownCh: - return - } - } -} - -func (i *inmemPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { - // Create a new future - future := &appendFuture{ - start: time.Now(), - args: args, - resp: resp, - } - future.init() - - // Handle a timeout - var timeout <-chan time.Time - if i.trans.timeout > 0 { - timeout = time.After(i.trans.timeout) - } - - // Send the RPC over - respCh := make(chan RPCResponse, 1) - rpc := RPC{ - Command: args, - RespChan: respCh, - } - select { - case i.peer.consumerCh <- rpc: - case <-timeout: - return nil, fmt.Errorf("command enqueue timeout") - case <-i.shutdownCh: - return nil, ErrPipelineShutdown - } - - // Send to be decoded - select { - case i.inprogressCh <- &inmemPipelineInflight{future, respCh}: - return future, nil - case <-i.shutdownCh: - return nil, ErrPipelineShutdown - } -} - -func (i *inmemPipeline) Consumer() <-chan AppendFuture { - return i.doneCh -} - -func (i *inmemPipeline) Close() error { - i.shutdownLock.Lock() - defer i.shutdownLock.Unlock() - if i.shutdown { - return nil - } - - i.shutdown = true - close(i.shutdownCh) - return nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport_test.go deleted file mode 100644 index 2086a2389..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/inmem_transport_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package raft - -import ( - "testing" -) - -func TestInmemTransportImpl(t *testing.T) { - var inm interface{} = &InmemTransport{} - if _, ok := inm.(Transport); !ok { - t.Fatalf("InmemTransport is not a Transport") - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/integ_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/integ_test.go deleted file mode 100644 index 1d071e139..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/integ_test.go +++ /dev/null @@ -1,266 +0,0 @@ -package raft - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "testing" - "time" -) - -// CheckInteg will skip a test if integration testing is not enabled. -func CheckInteg(t *testing.T) { - if !IsInteg() { - t.SkipNow() - } -} - -// IsInteg returns a boolean telling you if we're in integ testing mode. -func IsInteg() bool { - return os.Getenv("INTEG_TESTS") != "" -} - -type RaftEnv struct { - dir string - conf *Config - fsm *MockFSM - store *InmemStore - snapshot *FileSnapshotStore - peers *JSONPeers - trans *NetworkTransport - raft *Raft -} - -func (r *RaftEnv) Release() { - log.Printf("[WARN] Release node at %v", r.raft.localAddr) - f := r.raft.Shutdown() - if err := f.Error(); err != nil { - panic(err) - } - r.trans.Close() - os.RemoveAll(r.dir) -} - -func MakeRaft(t *testing.T, conf *Config) *RaftEnv { - env := &RaftEnv{} - - // Set the config - if conf == nil { - conf = inmemConfig() - } - env.conf = conf - - dir, err := ioutil.TempDir("", "raft") - if err != nil { - t.Fatalf("err: %v ", err) - } - env.dir = dir - - stable := NewInmemStore() - env.store = stable - - snap, err := NewFileSnapshotStore(dir, 3, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - env.snapshot = snap - - env.fsm = &MockFSM{} - - trans, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - env.trans = trans - - env.peers = NewJSONPeers(dir, trans) - - log.Printf("[INFO] Starting node at %v", trans.LocalAddr()) - raft, err := NewRaft(conf, env.fsm, stable, stable, snap, env.peers, trans) - if err != nil { - t.Fatalf("err: %v", err) - } - env.raft = raft - return env -} - -func WaitFor(env *RaftEnv, state RaftState) error { - limit := time.Now().Add(200 * time.Millisecond) - for env.raft.State() != state { - if time.Now().Before(limit) { - time.Sleep(10 * time.Millisecond) - } else { - return fmt.Errorf("failed to transition to state %v", state) - } - } - return nil -} - -func WaitForAny(state RaftState, envs []*RaftEnv) (*RaftEnv, error) { - limit := time.Now().Add(200 * time.Millisecond) -CHECK: - for _, env := range envs { - if env.raft.State() == state { - return env, nil - } - } - if time.Now().Before(limit) { - goto WAIT - } - return nil, fmt.Errorf("failed to find node in %v state", state) -WAIT: - time.Sleep(10 * time.Millisecond) - goto CHECK -} - -func WaitFuture(f Future, t *testing.T) error { - timer := time.AfterFunc(200*time.Millisecond, func() { - panic(fmt.Errorf("timeout waiting for future %v", f)) - }) - defer timer.Stop() - return f.Error() -} - -func NoErr(err error, t *testing.T) { - if err != nil { - t.Fatalf("err: %v", err) - } -} - -func CheckConsistent(envs []*RaftEnv, t *testing.T) { - limit := time.Now().Add(400 * time.Millisecond) - first := envs[0] - var err error -CHECK: - l1 := len(first.fsm.logs) - for i := 1; i < len(envs); i++ { - env := envs[i] - l2 := len(env.fsm.logs) - if l1 != l2 { - err = fmt.Errorf("log length mismatch %d %d", l1, l2) - goto ERR - } - for idx, log := range first.fsm.logs { - other := env.fsm.logs[idx] - if bytes.Compare(log, other) != 0 { - err = fmt.Errorf("log %d mismatch %v %v", idx, log, other) - goto ERR - } - } - } - return -ERR: - if time.Now().After(limit) { - t.Fatalf("%v", err) - } - time.Sleep(20 * time.Millisecond) - goto CHECK -} - -// Tests Raft by creating a cluster, growing it to 5 nodes while -// causing various stressful conditions -func TestRaft_Integ(t *testing.T) { - CheckInteg(t) - conf := DefaultConfig() - conf.HeartbeatTimeout = 50 * time.Millisecond - conf.ElectionTimeout = 50 * time.Millisecond - conf.LeaderLeaseTimeout = 50 * time.Millisecond - conf.CommitTimeout = 5 * time.Millisecond - conf.SnapshotThreshold = 100 - conf.TrailingLogs = 10 - conf.EnableSingleNode = true - - // Create a single node - env1 := MakeRaft(t, conf) - NoErr(WaitFor(env1, Leader), t) - - // Do some commits - var futures []Future - for i := 0; i < 100; i++ { - futures = append(futures, env1.raft.Apply([]byte(fmt.Sprintf("test%d", i)), 0)) - } - for _, f := range futures { - NoErr(WaitFuture(f, t), t) - log.Printf("[DEBUG] Applied %v", f) - } - - // Do a snapshot - NoErr(WaitFuture(env1.raft.Snapshot(), t), t) - - // Join a few nodes! - var envs []*RaftEnv - for i := 0; i < 4; i++ { - env := MakeRaft(t, conf) - addr := env.trans.LocalAddr() - NoErr(WaitFuture(env1.raft.AddPeer(addr), t), t) - envs = append(envs, env) - } - - // Wait for a leader - leader, err := WaitForAny(Leader, append([]*RaftEnv{env1}, envs...)) - NoErr(err, t) - - // Do some more commits - futures = nil - for i := 0; i < 100; i++ { - futures = append(futures, leader.raft.Apply([]byte(fmt.Sprintf("test%d", i)), 0)) - } - for _, f := range futures { - NoErr(WaitFuture(f, t), t) - log.Printf("[DEBUG] Applied %v", f) - } - - // Shoot two nodes in the head! - rm1, rm2 := envs[0], envs[1] - rm1.Release() - rm2.Release() - envs = envs[2:] - time.Sleep(10 * time.Millisecond) - - // Wait for a leader - leader, err = WaitForAny(Leader, append([]*RaftEnv{env1}, envs...)) - NoErr(err, t) - - // Do some more commits - futures = nil - for i := 0; i < 100; i++ { - futures = append(futures, leader.raft.Apply([]byte(fmt.Sprintf("test%d", i)), 0)) - } - for _, f := range futures { - NoErr(WaitFuture(f, t), t) - log.Printf("[DEBUG] Applied %v", f) - } - - // Join a few new nodes! - for i := 0; i < 2; i++ { - env := MakeRaft(t, conf) - addr := env.trans.LocalAddr() - NoErr(WaitFuture(leader.raft.AddPeer(addr), t), t) - envs = append(envs, env) - } - - // Remove the old nodes - NoErr(WaitFuture(leader.raft.RemovePeer(rm1.raft.localAddr), t), t) - NoErr(WaitFuture(leader.raft.RemovePeer(rm2.raft.localAddr), t), t) - - // Shoot the leader - env1.Release() - time.Sleep(3 * conf.HeartbeatTimeout) - - // Wait for a leader - leader, err = WaitForAny(Leader, envs) - NoErr(err, t) - - allEnvs := append([]*RaftEnv{env1}, envs...) - CheckConsistent(allEnvs, t) - - if len(env1.fsm.logs) != 300 { - t.Fatalf("should apply 300 logs! %d", len(env1.fsm.logs)) - } - - for _, e := range envs { - e.Release() - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/log.go b/Godeps/_workspace/src/github.com/hashicorp/raft/log.go deleted file mode 100644 index a8c5a40ea..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/log.go +++ /dev/null @@ -1,60 +0,0 @@ -package raft - -// LogType describes various types of log entries. -type LogType uint8 - -const ( - // LogCommand is applied to a user FSM. - LogCommand LogType = iota - - // LogNoop is used to assert leadership. - LogNoop - - // LogAddPeer is used to add a new peer. - LogAddPeer - - // LogRemovePeer is used to remove an existing peer. - LogRemovePeer - - // LogBarrier is used to ensure all preceding operations have been - // applied to the FSM. It is similar to LogNoop, but instead of returning - // once committed, it only returns once the FSM manager acks it. Otherwise - // it is possible there are operations committed but not yet applied to - // the FSM. - LogBarrier -) - -// Log entries are replicated to all members of the Raft cluster -// and form the heart of the replicated state machine. -type Log struct { - Index uint64 - Term uint64 - Type LogType - Data []byte - - // peer is not exported since it is not transmitted, only used - // internally to construct the Data field. - peer string -} - -// LogStore is used to provide an interface for storing -// and retrieving logs in a durable fashion. -type LogStore interface { - // Returns the first index written. 0 for no entries. - FirstIndex() (uint64, error) - - // Returns the last index written. 0 for no entries. - LastIndex() (uint64, error) - - // Gets a log entry at a given index. - GetLog(index uint64, log *Log) error - - // Stores a log entry. - StoreLog(log *Log) error - - // Stores multiple log entries. - StoreLogs(logs []*Log) error - - // Deletes a range of log entries. The range is inclusive. - DeleteRange(min, max uint64) error -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache.go b/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache.go deleted file mode 100644 index 952e98c22..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache.go +++ /dev/null @@ -1,79 +0,0 @@ -package raft - -import ( - "fmt" - "sync" -) - -// LogCache wraps any LogStore implementation to provide an -// in-memory ring buffer. This is used to cache access to -// the recently written entries. For implementations that do not -// cache themselves, this can provide a substantial boost by -// avoiding disk I/O on recent entries. -type LogCache struct { - store LogStore - - cache []*Log - l sync.RWMutex -} - -// NewLogCache is used to create a new LogCache with the -// given capacity and backend store. -func NewLogCache(capacity int, store LogStore) (*LogCache, error) { - if capacity <= 0 { - return nil, fmt.Errorf("capacity must be positive") - } - c := &LogCache{ - store: store, - cache: make([]*Log, capacity), - } - return c, nil -} - -func (c *LogCache) GetLog(idx uint64, log *Log) error { - // Check the buffer for an entry - c.l.RLock() - cached := c.cache[idx%uint64(len(c.cache))] - c.l.RUnlock() - - // Check if entry is valid - if cached != nil && cached.Index == idx { - *log = *cached - return nil - } - - // Forward request on cache miss - return c.store.GetLog(idx, log) -} - -func (c *LogCache) StoreLog(log *Log) error { - return c.StoreLogs([]*Log{log}) -} - -func (c *LogCache) StoreLogs(logs []*Log) error { - // Insert the logs into the ring buffer - c.l.Lock() - for _, l := range logs { - c.cache[l.Index%uint64(len(c.cache))] = l - } - c.l.Unlock() - - return c.store.StoreLogs(logs) -} - -func (c *LogCache) FirstIndex() (uint64, error) { - return c.store.FirstIndex() -} - -func (c *LogCache) LastIndex() (uint64, error) { - return c.store.LastIndex() -} - -func (c *LogCache) DeleteRange(min, max uint64) error { - // Invalidate the cache on deletes - c.l.Lock() - c.cache = make([]*Log, len(c.cache)) - c.l.Unlock() - - return c.store.DeleteRange(min, max) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache_test.go deleted file mode 100644 index 7569e78ee..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/log_cache_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package raft - -import ( - "testing" -) - -func TestLogCache(t *testing.T) { - store := NewInmemStore() - c, _ := NewLogCache(16, store) - - // Insert into the in-mem store - for i := 0; i < 32; i++ { - log := &Log{Index: uint64(i) + 1} - store.StoreLog(log) - } - - // Check the indexes - if idx, _ := c.FirstIndex(); idx != 1 { - t.Fatalf("bad: %d", idx) - } - if idx, _ := c.LastIndex(); idx != 32 { - t.Fatalf("bad: %d", idx) - } - - // Try get log with a miss - var out Log - err := c.GetLog(1, &out) - if err != nil { - t.Fatalf("err: %v", err) - } - if out.Index != 1 { - t.Fatalf("bad: %#v", out) - } - - // Store logs - l1 := &Log{Index: 33} - l2 := &Log{Index: 34} - err = c.StoreLogs([]*Log{l1, l2}) - if err != nil { - t.Fatalf("err: %v", err) - } - - if idx, _ := c.LastIndex(); idx != 34 { - t.Fatalf("bad: %d", idx) - } - - // Check that it wrote-through - err = store.GetLog(33, &out) - if err != nil { - t.Fatalf("err: %v", err) - } - err = store.GetLog(34, &out) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Delete in the backend - err = store.DeleteRange(33, 34) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should be in the ring buffer - err = c.GetLog(33, &out) - if err != nil { - t.Fatalf("err: %v", err) - } - err = c.GetLog(34, &out) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Purge the ring buffer - err = c.DeleteRange(33, 34) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Should not be in the ring buffer - err = c.GetLog(33, &out) - if err != ErrLogNotFound { - t.Fatalf("err: %v", err) - } - err = c.GetLog(34, &out) - if err != ErrLogNotFound { - t.Fatalf("err: %v", err) - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport.go b/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport.go deleted file mode 100644 index 3f3ed31fd..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport.go +++ /dev/null @@ -1,606 +0,0 @@ -package raft - -import ( - "bufio" - "errors" - "fmt" - "io" - "log" - "net" - "os" - "sync" - "time" - - "github.com/hashicorp/go-msgpack/codec" -) - -const ( - rpcAppendEntries uint8 = iota - rpcRequestVote - rpcInstallSnapshot - - // DefaultTimeoutScale is the default TimeoutScale in a NetworkTransport. - DefaultTimeoutScale = 256 * 1024 // 256KB - - // rpcMaxPipeline controls the maximum number of outstanding - // AppendEntries RPC calls. - rpcMaxPipeline = 128 -) - -var ( - // ErrTransportShutdown is returned when operations on a transport are - // invoked after it's been terminated. - ErrTransportShutdown = errors.New("transport shutdown") - - // ErrPipelineShutdown is returned when the pipeline is closed. - ErrPipelineShutdown = errors.New("append pipeline closed") -) - -/* - -NetworkTransport provides a network based transport that can be -used to communicate with Raft on remote machines. It requires -an underlying stream layer to provide a stream abstraction, which can -be simple TCP, TLS, etc. - -This transport is very simple and lightweight. Each RPC request is -framed by sending a byte that indicates the message type, followed -by the MsgPack encoded request. - -The response is an error string followed by the response object, -both are encoded using MsgPack. - -InstallSnapshot is special, in that after the RPC request we stream -the entire state. That socket is not re-used as the connection state -is not known if there is an error. - -*/ -type NetworkTransport struct { - connPool map[string][]*netConn - connPoolLock sync.Mutex - - consumeCh chan RPC - - heartbeatFn func(RPC) - heartbeatFnLock sync.Mutex - - logger *log.Logger - - maxPool int - - shutdown bool - shutdownCh chan struct{} - shutdownLock sync.Mutex - - stream StreamLayer - - timeout time.Duration - TimeoutScale int -} - -// StreamLayer is used with the NetworkTransport to provide -// the low level stream abstraction. -type StreamLayer interface { - net.Listener - - // Dial is used to create a new outgoing connection - Dial(address string, timeout time.Duration) (net.Conn, error) -} - -type netConn struct { - target string - conn net.Conn - r *bufio.Reader - w *bufio.Writer - dec *codec.Decoder - enc *codec.Encoder -} - -func (n *netConn) Release() error { - return n.conn.Close() -} - -type netPipeline struct { - conn *netConn - trans *NetworkTransport - - doneCh chan AppendFuture - inprogressCh chan *appendFuture - - shutdown bool - shutdownCh chan struct{} - shutdownLock sync.Mutex -} - -// NewNetworkTransport creates a new network transport with the given dialer -// and listener. The maxPool controls how many connections we will pool. The -// timeout is used to apply I/O deadlines. For InstallSnapshot, we multiply -// the timeout by (SnapshotSize / TimeoutScale). -func NewNetworkTransport( - stream StreamLayer, - maxPool int, - timeout time.Duration, - logOutput io.Writer, -) *NetworkTransport { - if logOutput == nil { - logOutput = os.Stderr - } - trans := &NetworkTransport{ - connPool: make(map[string][]*netConn), - consumeCh: make(chan RPC), - logger: log.New(logOutput, "", log.LstdFlags), - maxPool: maxPool, - shutdownCh: make(chan struct{}), - stream: stream, - timeout: timeout, - TimeoutScale: DefaultTimeoutScale, - } - go trans.listen() - return trans -} - -// SetHeartbeatHandler is used to setup a heartbeat handler -// as a fast-pass. This is to avoid head-of-line blocking from -// disk IO. -func (n *NetworkTransport) SetHeartbeatHandler(cb func(rpc RPC)) { - n.heartbeatFnLock.Lock() - defer n.heartbeatFnLock.Unlock() - n.heartbeatFn = cb -} - -// Close is used to stop the network transport. -func (n *NetworkTransport) Close() error { - n.shutdownLock.Lock() - defer n.shutdownLock.Unlock() - - if !n.shutdown { - close(n.shutdownCh) - n.stream.Close() - n.shutdown = true - } - return nil -} - -// Consumer implements the Transport interface. -func (n *NetworkTransport) Consumer() <-chan RPC { - return n.consumeCh -} - -// LocalAddr implements the Transport interface. -func (n *NetworkTransport) LocalAddr() string { - return n.stream.Addr().String() -} - -// IsShutdown is used to check if the transport is shutdown. -func (n *NetworkTransport) IsShutdown() bool { - select { - case <-n.shutdownCh: - return true - default: - return false - } -} - -// getExistingConn is used to grab a pooled connection. -func (n *NetworkTransport) getPooledConn(target string) *netConn { - n.connPoolLock.Lock() - defer n.connPoolLock.Unlock() - - conns, ok := n.connPool[target] - if !ok || len(conns) == 0 { - return nil - } - - var conn *netConn - num := len(conns) - conn, conns[num-1] = conns[num-1], nil - n.connPool[target] = conns[:num-1] - return conn -} - -// getConn is used to get a connection from the pool. -func (n *NetworkTransport) getConn(target string) (*netConn, error) { - // Check for a pooled conn - if conn := n.getPooledConn(target); conn != nil { - return conn, nil - } - - // Dial a new connection - conn, err := n.stream.Dial(target, n.timeout) - if err != nil { - return nil, err - } - - // Wrap the conn - netConn := &netConn{ - target: target, - conn: conn, - r: bufio.NewReader(conn), - w: bufio.NewWriter(conn), - } - - // Setup encoder/decoders - netConn.dec = codec.NewDecoder(netConn.r, &codec.MsgpackHandle{}) - netConn.enc = codec.NewEncoder(netConn.w, &codec.MsgpackHandle{}) - - // Done - return netConn, nil -} - -// returnConn returns a connection back to the pool. -func (n *NetworkTransport) returnConn(conn *netConn) { - n.connPoolLock.Lock() - defer n.connPoolLock.Unlock() - - key := conn.target - conns, _ := n.connPool[key] - - if !n.IsShutdown() && len(conns) < n.maxPool { - n.connPool[key] = append(conns, conn) - } else { - conn.Release() - } -} - -// AppendEntriesPipeline returns an interface that can be used to pipeline -// AppendEntries requests. -func (n *NetworkTransport) AppendEntriesPipeline(target string) (AppendPipeline, error) { - // Get a connection - conn, err := n.getConn(target) - if err != nil { - return nil, err - } - - // Create the pipeline - return newNetPipeline(n, conn), nil -} - -// AppendEntries implements the Transport interface. -func (n *NetworkTransport) AppendEntries(target string, args *AppendEntriesRequest, resp *AppendEntriesResponse) error { - return n.genericRPC(target, rpcAppendEntries, args, resp) -} - -// RequestVote implements the Transport interface. -func (n *NetworkTransport) RequestVote(target string, args *RequestVoteRequest, resp *RequestVoteResponse) error { - return n.genericRPC(target, rpcRequestVote, args, resp) -} - -// genericRPC handles a simple request/response RPC. -func (n *NetworkTransport) genericRPC(target string, rpcType uint8, args interface{}, resp interface{}) error { - // Get a conn - conn, err := n.getConn(target) - if err != nil { - return err - } - - // Set a deadline - if n.timeout > 0 { - conn.conn.SetDeadline(time.Now().Add(n.timeout)) - } - - // Send the RPC - if err := sendRPC(conn, rpcType, args); err != nil { - return err - } - - // Decode the response - canReturn, err := decodeResponse(conn, resp) - if canReturn { - n.returnConn(conn) - } - return err -} - -// InstallSnapshot implements the Transport interface. -func (n *NetworkTransport) InstallSnapshot(target string, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { - // Get a conn, always close for InstallSnapshot - conn, err := n.getConn(target) - if err != nil { - return err - } - defer conn.Release() - - // Set a deadline, scaled by request size - if n.timeout > 0 { - timeout := n.timeout * time.Duration(args.Size/int64(n.TimeoutScale)) - if timeout < n.timeout { - timeout = n.timeout - } - conn.conn.SetDeadline(time.Now().Add(timeout)) - } - - // Send the RPC - if err := sendRPC(conn, rpcInstallSnapshot, args); err != nil { - return err - } - - // Stream the state - if _, err := io.Copy(conn.w, data); err != nil { - return err - } - - // Flush - if err := conn.w.Flush(); err != nil { - return err - } - - // Decode the response, do not return conn - _, err = decodeResponse(conn, resp) - return err -} - -// EncodePeer implements the Transport interface. -func (n *NetworkTransport) EncodePeer(p string) []byte { - return []byte(p) -} - -// DecodePeer implements the Transport interface. -func (n *NetworkTransport) DecodePeer(buf []byte) string { - return string(buf) -} - -// listen is used to handling incoming connections. -func (n *NetworkTransport) listen() { - for { - // Accept incoming connections - conn, err := n.stream.Accept() - if err != nil { - if n.IsShutdown() { - return - } - n.logger.Printf("[ERR] raft-net: Failed to accept connection: %v", err) - continue - } - n.logger.Printf("[DEBUG] raft-net: %v accepted connection from: %v", n.LocalAddr(), conn.RemoteAddr()) - - // Handle the connection in dedicated routine - go n.handleConn(conn) - } -} - -// handleConn is used to handle an inbound connection for its lifespan. -func (n *NetworkTransport) handleConn(conn net.Conn) { - defer conn.Close() - r := bufio.NewReader(conn) - w := bufio.NewWriter(conn) - dec := codec.NewDecoder(r, &codec.MsgpackHandle{}) - enc := codec.NewEncoder(w, &codec.MsgpackHandle{}) - - for { - if err := n.handleCommand(r, dec, enc); err != nil { - if err != io.EOF { - n.logger.Printf("[ERR] raft-net: Failed to decode incoming command: %v", err) - } - return - } - if err := w.Flush(); err != nil { - n.logger.Printf("[ERR] raft-net: Failed to flush response: %v", err) - return - } - } -} - -// handleCommand is used to decode and dispatch a single command. -func (n *NetworkTransport) handleCommand(r *bufio.Reader, dec *codec.Decoder, enc *codec.Encoder) error { - // Get the rpc type - rpcType, err := r.ReadByte() - if err != nil { - return err - } - - // Create the RPC object - respCh := make(chan RPCResponse, 1) - rpc := RPC{ - RespChan: respCh, - } - - // Decode the command - isHeartbeat := false - switch rpcType { - case rpcAppendEntries: - var req AppendEntriesRequest - if err := dec.Decode(&req); err != nil { - return err - } - rpc.Command = &req - - // Check if this is a heartbeat - if req.Term != 0 && req.Leader != nil && - req.PrevLogEntry == 0 && req.PrevLogTerm == 0 && - len(req.Entries) == 0 && req.LeaderCommitIndex == 0 { - isHeartbeat = true - } - - case rpcRequestVote: - var req RequestVoteRequest - if err := dec.Decode(&req); err != nil { - return err - } - rpc.Command = &req - - case rpcInstallSnapshot: - var req InstallSnapshotRequest - if err := dec.Decode(&req); err != nil { - return err - } - rpc.Command = &req - rpc.Reader = io.LimitReader(r, req.Size) - - default: - return fmt.Errorf("unknown rpc type %d", rpcType) - } - - // Check for heartbeat fast-path - if isHeartbeat { - n.heartbeatFnLock.Lock() - fn := n.heartbeatFn - n.heartbeatFnLock.Unlock() - if fn != nil { - fn(rpc) - goto RESP - } - } - - // Dispatch the RPC - select { - case n.consumeCh <- rpc: - case <-n.shutdownCh: - return ErrTransportShutdown - } - - // Wait for response -RESP: - select { - case resp := <-respCh: - // Send the error first - respErr := "" - if resp.Error != nil { - respErr = resp.Error.Error() - } - if err := enc.Encode(respErr); err != nil { - return err - } - - // Send the response - if err := enc.Encode(resp.Response); err != nil { - return err - } - case <-n.shutdownCh: - return ErrTransportShutdown - } - return nil -} - -// decodeResponse is used to decode an RPC response and reports whether -// the connection can be reused. -func decodeResponse(conn *netConn, resp interface{}) (bool, error) { - // Decode the error if any - var rpcError string - if err := conn.dec.Decode(&rpcError); err != nil { - conn.Release() - return false, err - } - - // Decode the response - if err := conn.dec.Decode(resp); err != nil { - conn.Release() - return false, err - } - - // Format an error if any - if rpcError != "" { - return true, fmt.Errorf(rpcError) - } - return true, nil -} - -// sendRPC is used to encode and send the RPC. -func sendRPC(conn *netConn, rpcType uint8, args interface{}) error { - // Write the request type - if err := conn.w.WriteByte(rpcType); err != nil { - conn.Release() - return err - } - - // Send the request - if err := conn.enc.Encode(args); err != nil { - conn.Release() - return err - } - - // Flush - if err := conn.w.Flush(); err != nil { - conn.Release() - return err - } - return nil -} - -// newNetPipeline is used to construct a netPipeline from a given -// transport and connection. -func newNetPipeline(trans *NetworkTransport, conn *netConn) *netPipeline { - n := &netPipeline{ - conn: conn, - trans: trans, - doneCh: make(chan AppendFuture, rpcMaxPipeline), - inprogressCh: make(chan *appendFuture, rpcMaxPipeline), - shutdownCh: make(chan struct{}), - } - go n.decodeResponses() - return n -} - -// decodeResponses is a long running routine that decodes the responses -// sent on the connection. -func (n *netPipeline) decodeResponses() { - timeout := n.trans.timeout - for { - select { - case future := <-n.inprogressCh: - if timeout > 0 { - n.conn.conn.SetReadDeadline(time.Now().Add(timeout)) - } - - _, err := decodeResponse(n.conn, future.resp) - future.respond(err) - select { - case n.doneCh <- future: - case <-n.shutdownCh: - return - } - case <-n.shutdownCh: - return - } - } -} - -// AppendEntries is used to pipeline a new append entries request. -func (n *netPipeline) AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) { - // Create a new future - future := &appendFuture{ - start: time.Now(), - args: args, - resp: resp, - } - future.init() - - // Add a send timeout - if timeout := n.trans.timeout; timeout > 0 { - n.conn.conn.SetWriteDeadline(time.Now().Add(timeout)) - } - - // Send the RPC - if err := sendRPC(n.conn, rpcAppendEntries, future.args); err != nil { - return nil, err - } - - // Hand-off for decoding, this can also cause back-pressure - // to prevent too many inflight requests - select { - case n.inprogressCh <- future: - return future, nil - case <-n.shutdownCh: - return nil, ErrPipelineShutdown - } -} - -// Consumer returns a channel that can be used to consume complete futures. -func (n *netPipeline) Consumer() <-chan AppendFuture { - return n.doneCh -} - -// Closed is used to shutdown the pipeline connection. -func (n *netPipeline) Close() error { - n.shutdownLock.Lock() - defer n.shutdownLock.Unlock() - if n.shutdown { - return nil - } - - // Release the connection - n.conn.Release() - - n.shutdown = true - close(n.shutdownCh) - return nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport_test.go deleted file mode 100644 index 0127ac55e..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/net_transport_test.go +++ /dev/null @@ -1,449 +0,0 @@ -package raft - -import ( - "bytes" - "reflect" - "sync" - "testing" - "time" -) - -func TestNetworkTransport_StartStop(t *testing.T) { - trans, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - trans.Close() -} - -func TestNetworkTransport_Heartbeat_FastPath(t *testing.T) { - // Transport 1 is consumer - trans1, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer trans1.Close() - - // Make the RPC request - args := AppendEntriesRequest{ - Term: 10, - Leader: []byte("cartman"), - } - resp := AppendEntriesResponse{ - Term: 4, - LastLog: 90, - Success: true, - } - - invoked := false - fastpath := func(rpc RPC) { - // Verify the command - req := rpc.Command.(*AppendEntriesRequest) - if !reflect.DeepEqual(req, &args) { - t.Fatalf("command mismatch: %#v %#v", *req, args) - } - - rpc.Respond(&resp, nil) - invoked = true - } - trans1.SetHeartbeatHandler(fastpath) - - // Transport 2 makes outbound request - trans2, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer trans2.Close() - - var out AppendEntriesResponse - if err := trans2.AppendEntries(trans1.LocalAddr(), &args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - // Verify the response - if !reflect.DeepEqual(resp, out) { - t.Fatalf("command mismatch: %#v %#v", resp, out) - } - - // Ensure fast-path is used - if !invoked { - t.Fatalf("fast-path not used") - } -} - -func TestNetworkTransport_AppendEntries(t *testing.T) { - // Transport 1 is consumer - trans1, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer trans1.Close() - rpcCh := trans1.Consumer() - - // Make the RPC request - args := AppendEntriesRequest{ - Term: 10, - Leader: []byte("cartman"), - PrevLogEntry: 100, - PrevLogTerm: 4, - Entries: []*Log{ - &Log{ - Index: 101, - Term: 4, - Type: LogNoop, - }, - }, - LeaderCommitIndex: 90, - } - resp := AppendEntriesResponse{ - Term: 4, - LastLog: 90, - Success: true, - } - - // Listen for a request - go func() { - select { - case rpc := <-rpcCh: - // Verify the command - req := rpc.Command.(*AppendEntriesRequest) - if !reflect.DeepEqual(req, &args) { - t.Fatalf("command mismatch: %#v %#v", *req, args) - } - - rpc.Respond(&resp, nil) - - case <-time.After(200 * time.Millisecond): - t.Fatalf("timeout") - } - }() - - // Transport 2 makes outbound request - trans2, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer trans2.Close() - - var out AppendEntriesResponse - if err := trans2.AppendEntries(trans1.LocalAddr(), &args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - // Verify the response - if !reflect.DeepEqual(resp, out) { - t.Fatalf("command mismatch: %#v %#v", resp, out) - } -} - -func TestNetworkTransport_AppendEntriesPipeline(t *testing.T) { - // Transport 1 is consumer - trans1, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer trans1.Close() - rpcCh := trans1.Consumer() - - // Make the RPC request - args := AppendEntriesRequest{ - Term: 10, - Leader: []byte("cartman"), - PrevLogEntry: 100, - PrevLogTerm: 4, - Entries: []*Log{ - &Log{ - Index: 101, - Term: 4, - Type: LogNoop, - }, - }, - LeaderCommitIndex: 90, - } - resp := AppendEntriesResponse{ - Term: 4, - LastLog: 90, - Success: true, - } - - // Listen for a request - go func() { - for i := 0; i < 10; i++ { - select { - case rpc := <-rpcCh: - // Verify the command - req := rpc.Command.(*AppendEntriesRequest) - if !reflect.DeepEqual(req, &args) { - t.Fatalf("command mismatch: %#v %#v", *req, args) - } - rpc.Respond(&resp, nil) - - case <-time.After(200 * time.Millisecond): - t.Fatalf("timeout") - } - } - }() - - // Transport 2 makes outbound request - trans2, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer trans2.Close() - - pipeline, err := trans2.AppendEntriesPipeline(trans1.LocalAddr()) - if err != nil { - t.Fatalf("err: %v", err) - } - defer pipeline.Close() - for i := 0; i < 10; i++ { - out := new(AppendEntriesResponse) - if _, err := pipeline.AppendEntries(&args, out); err != nil { - t.Fatalf("err: %v", err) - } - } - - respCh := pipeline.Consumer() - for i := 0; i < 10; i++ { - select { - case ready := <-respCh: - // Verify the response - if !reflect.DeepEqual(&resp, ready.Response()) { - t.Fatalf("command mismatch: %#v %#v", &resp, ready.Response()) - } - case <-time.After(200 * time.Millisecond): - t.Fatalf("timeout") - } - } -} - -func TestNetworkTransport_RequestVote(t *testing.T) { - // Transport 1 is consumer - trans1, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer trans1.Close() - rpcCh := trans1.Consumer() - - // Make the RPC request - args := RequestVoteRequest{ - Term: 20, - Candidate: []byte("butters"), - LastLogIndex: 100, - LastLogTerm: 19, - } - resp := RequestVoteResponse{ - Term: 100, - Peers: []byte("blah"), - Granted: false, - } - - // Listen for a request - go func() { - select { - case rpc := <-rpcCh: - // Verify the command - req := rpc.Command.(*RequestVoteRequest) - if !reflect.DeepEqual(req, &args) { - t.Fatalf("command mismatch: %#v %#v", *req, args) - } - - rpc.Respond(&resp, nil) - - case <-time.After(200 * time.Millisecond): - t.Fatalf("timeout") - } - }() - - // Transport 2 makes outbound request - trans2, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer trans2.Close() - - var out RequestVoteResponse - if err := trans2.RequestVote(trans1.LocalAddr(), &args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - // Verify the response - if !reflect.DeepEqual(resp, out) { - t.Fatalf("command mismatch: %#v %#v", resp, out) - } -} - -func TestNetworkTransport_InstallSnapshot(t *testing.T) { - // Transport 1 is consumer - trans1, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer trans1.Close() - rpcCh := trans1.Consumer() - - // Make the RPC request - args := InstallSnapshotRequest{ - Term: 10, - Leader: []byte("kyle"), - LastLogIndex: 100, - LastLogTerm: 9, - Peers: []byte("blah blah"), - Size: 10, - } - resp := InstallSnapshotResponse{ - Term: 10, - Success: true, - } - - // Listen for a request - go func() { - select { - case rpc := <-rpcCh: - // Verify the command - req := rpc.Command.(*InstallSnapshotRequest) - if !reflect.DeepEqual(req, &args) { - t.Fatalf("command mismatch: %#v %#v", *req, args) - } - - // Try to read the bytes - buf := make([]byte, 10) - rpc.Reader.Read(buf) - - // Compare - if bytes.Compare(buf, []byte("0123456789")) != 0 { - t.Fatalf("bad buf %v", buf) - } - - rpc.Respond(&resp, nil) - - case <-time.After(200 * time.Millisecond): - t.Fatalf("timeout") - } - }() - - // Transport 2 makes outbound request - trans2, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer trans2.Close() - - // Create a buffer - buf := bytes.NewBuffer([]byte("0123456789")) - - var out InstallSnapshotResponse - if err := trans2.InstallSnapshot(trans1.LocalAddr(), &args, &out, buf); err != nil { - t.Fatalf("err: %v", err) - } - - // Verify the response - if !reflect.DeepEqual(resp, out) { - t.Fatalf("command mismatch: %#v %#v", resp, out) - } -} - -func TestNetworkTransport_EncodeDecode(t *testing.T) { - // Transport 1 is consumer - trans1, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer trans1.Close() - - local := trans1.LocalAddr() - enc := trans1.EncodePeer(local) - dec := trans1.DecodePeer(enc) - - if dec != local { - t.Fatalf("enc/dec fail: %v %v", dec, local) - } -} - -func TestNetworkTransport_PooledConn(t *testing.T) { - // Transport 1 is consumer - trans1, err := NewTCPTransport("127.0.0.1:0", nil, 2, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer trans1.Close() - rpcCh := trans1.Consumer() - - // Make the RPC request - args := AppendEntriesRequest{ - Term: 10, - Leader: []byte("cartman"), - PrevLogEntry: 100, - PrevLogTerm: 4, - Entries: []*Log{ - &Log{ - Index: 101, - Term: 4, - Type: LogNoop, - }, - }, - LeaderCommitIndex: 90, - } - resp := AppendEntriesResponse{ - Term: 4, - LastLog: 90, - Success: true, - } - - // Listen for a request - go func() { - for { - select { - case rpc := <-rpcCh: - // Verify the command - req := rpc.Command.(*AppendEntriesRequest) - if !reflect.DeepEqual(req, &args) { - t.Fatalf("command mismatch: %#v %#v", *req, args) - } - rpc.Respond(&resp, nil) - - case <-time.After(200 * time.Millisecond): - return - } - } - }() - - // Transport 2 makes outbound request, 3 conn pool - trans2, err := NewTCPTransport("127.0.0.1:0", nil, 3, time.Second, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - defer trans2.Close() - - // Create wait group - wg := &sync.WaitGroup{} - wg.Add(5) - - appendFunc := func() { - defer wg.Done() - var out AppendEntriesResponse - if err := trans2.AppendEntries(trans1.LocalAddr(), &args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - // Verify the response - if !reflect.DeepEqual(resp, out) { - t.Fatalf("command mismatch: %#v %#v", resp, out) - } - } - - // Try to do parallel appends, should stress the conn pool - for i := 0; i < 5; i++ { - go appendFunc() - } - - // Wait for the routines to finish - wg.Wait() - - // Check the conn pool size - addr := trans1.LocalAddr() - if len(trans2.connPool[addr]) != 3 { - t.Fatalf("Expected 2 pooled conns!") - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/peer.go b/Godeps/_workspace/src/github.com/hashicorp/raft/peer.go deleted file mode 100644 index 6f3bcf856..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/peer.go +++ /dev/null @@ -1,122 +0,0 @@ -package raft - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - "sync" -) - -const ( - jsonPeerPath = "peers.json" -) - -// PeerStore provides an interface for persistent storage and -// retrieval of peers. We use a separate interface than StableStore -// since the peers may need to be edited by a human operator. For example, -// in a two node cluster, the failure of either node requires human intervention -// since consensus is impossible. -type PeerStore interface { - // Peers returns the list of known peers. - Peers() ([]string, error) - - // SetPeers sets the list of known peers. This is invoked when a peer is - // added or removed. - SetPeers([]string) error -} - -// StaticPeers is used to provide a static list of peers. -type StaticPeers struct { - StaticPeers []string - l sync.Mutex -} - -// Peers implements the PeerStore interface. -func (s *StaticPeers) Peers() ([]string, error) { - s.l.Lock() - peers := s.StaticPeers - s.l.Unlock() - return peers, nil -} - -// SetPeers implements the PeerStore interface. -func (s *StaticPeers) SetPeers(p []string) error { - s.l.Lock() - s.StaticPeers = p - s.l.Unlock() - return nil -} - -// JSONPeers is used to provide peer persistence on disk in the form -// of a JSON file. This allows human operators to manipulate the file. -type JSONPeers struct { - l sync.Mutex - path string - trans Transport -} - -// NewJSONPeers creates a new JSONPeers store. Requires a transport -// to handle the serialization of network addresses. -func NewJSONPeers(base string, trans Transport) *JSONPeers { - path := filepath.Join(base, jsonPeerPath) - store := &JSONPeers{ - path: path, - trans: trans, - } - return store -} - -// Peers implements the PeerStore interface. -func (j *JSONPeers) Peers() ([]string, error) { - j.l.Lock() - defer j.l.Unlock() - - // Read the file - buf, err := ioutil.ReadFile(j.path) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - - // Check for no peers - if len(buf) == 0 { - return nil, nil - } - - // Decode the peers - var peerSet []string - dec := json.NewDecoder(bytes.NewReader(buf)) - if err := dec.Decode(&peerSet); err != nil { - return nil, err - } - - // Deserialize each peer - var peers []string - for _, p := range peerSet { - peers = append(peers, j.trans.DecodePeer([]byte(p))) - } - return peers, nil -} - -// SetPeers implements the PeerStore interface. -func (j *JSONPeers) SetPeers(peers []string) error { - j.l.Lock() - defer j.l.Unlock() - - // Encode each peer - var peerSet []string - for _, p := range peers { - peerSet = append(peerSet, string(j.trans.EncodePeer(p))) - } - - // Convert to JSON - var buf bytes.Buffer - enc := json.NewEncoder(&buf) - if err := enc.Encode(peerSet); err != nil { - return err - } - - // Write out as JSON - return ioutil.WriteFile(j.path, buf.Bytes(), 0755) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/peer_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/peer_test.go deleted file mode 100644 index 1cb1159e2..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/peer_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package raft - -import ( - "io/ioutil" - "os" - "testing" -) - -func TestJSONPeers(t *testing.T) { - // Create a test dir - dir, err := ioutil.TempDir("", "raft") - if err != nil { - t.Fatalf("err: %v ", err) - } - defer os.RemoveAll(dir) - - // Create the store - _, trans := NewInmemTransport() - store := NewJSONPeers(dir, trans) - - // Try a read, should get nothing - peers, err := store.Peers() - if err != nil { - t.Fatalf("err: %v", err) - } - if len(peers) != 0 { - t.Fatalf("peers: %v", peers) - } - - // Initialize some peers - newPeers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} - if err := store.SetPeers(newPeers); err != nil { - t.Fatalf("err: %v", err) - } - - // Try a read, should peers - peers, err = store.Peers() - if err != nil { - t.Fatalf("err: %v", err) - } - if len(peers) != 3 { - t.Fatalf("peers: %v", peers) - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/raft.go b/Godeps/_workspace/src/github.com/hashicorp/raft/raft.go deleted file mode 100644 index 2fdc6d796..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/raft.go +++ /dev/null @@ -1,1781 +0,0 @@ -package raft - -import ( - "bytes" - "errors" - "fmt" - "io" - "log" - "os" - "strconv" - "sync" - "time" - - "github.com/armon/go-metrics" -) - -const ( - minCheckInterval = 10 * time.Millisecond -) - -var ( - keyCurrentTerm = []byte("CurrentTerm") - keyLastVoteTerm = []byte("LastVoteTerm") - keyLastVoteCand = []byte("LastVoteCand") - - // ErrLeader is returned when an operation can't be completed on a - // leader node. - ErrLeader = errors.New("node is the leader") - - // ErrNotLeader is returned when an operation can't be completed on a - // follower or candidate node. - ErrNotLeader = errors.New("node is not the leader") - - // ErrLeadershipLost is returned when a leader fails to commit a log entry - // because it's been deposed in the process. - ErrLeadershipLost = errors.New("leadership lost while committing log") - - // ErrRaftShutdown is returned when operations are requested against an - // inactive Raft. - ErrRaftShutdown = errors.New("raft is already shutdown") - - // ErrEnqueueTimeout is returned when a command fails due to a timeout. - ErrEnqueueTimeout = errors.New("timed out enqueuing operation") - - // ErrKnownPeer is returned when trying to add a peer to the configuration - // that already exists. - ErrKnownPeer = errors.New("peer already known") - - // ErrUnknownPeer is returned when trying to remove a peer from the - // configuration that doesn't exist. - ErrUnknownPeer = errors.New("peer is unknown") -) - -// commitTuple is used to send an index that was committed, -// with an optional associated future that should be invoked. -type commitTuple struct { - log *Log - future *logFuture -} - -// leaderState is state that is used while we are a leader. -type leaderState struct { - commitCh chan struct{} - inflight *inflight - replState map[string]*followerReplication - notify map[*verifyFuture]struct{} - stepDown chan struct{} -} - -// Raft implements a Raft node. -type Raft struct { - raftState - - // applyCh is used to async send logs to the main thread to - // be committed and applied to the FSM. - applyCh chan *logFuture - - // Configuration provided at Raft initialization - conf *Config - - // FSM is the client state machine to apply commands to - fsm FSM - - // fsmCommitCh is used to trigger async application of logs to the fsm - fsmCommitCh chan commitTuple - - // fsmRestoreCh is used to trigger a restore from snapshot - fsmRestoreCh chan *restoreFuture - - // fsmSnapshotCh is used to trigger a new snapshot being taken - fsmSnapshotCh chan *reqSnapshotFuture - - // lastContact is the last time we had contact from the - // leader node. This can be used to gauge staleness. - lastContact time.Time - lastContactLock sync.RWMutex - - // Leader is the current cluster leader - leader string - leaderLock sync.RWMutex - - // leaderCh is used to notify of leadership changes - leaderCh chan bool - - // leaderState used only while state is leader - leaderState leaderState - - // Stores our local addr - localAddr string - - // Used for our logging - logger *log.Logger - - // LogStore provides durable storage for logs - logs LogStore - - // Track our known peers - peerCh chan *peerFuture - peers []string - peerStore PeerStore - - // RPC chan comes from the transport layer - rpcCh <-chan RPC - - // Shutdown channel to exit, protected to prevent concurrent exits - shutdown bool - shutdownCh chan struct{} - shutdownLock sync.Mutex - - // snapshots is used to store and retrieve snapshots - snapshots SnapshotStore - - // snapshotCh is used for user triggered snapshots - snapshotCh chan *snapshotFuture - - // stable is a StableStore implementation for durable state - // It provides stable storage for many fields in raftState - stable StableStore - - // The transport layer we use - trans Transport - - // verifyCh is used to async send verify futures to the main thread - // to verify we are still the leader - verifyCh chan *verifyFuture -} - -// NewRaft is used to construct a new Raft node. It takes a configuration, as well -// as implementations of various interfaces that are required. If we have any old state, -// such as snapshots, logs, peers, etc, all those will be restored when creating the -// Raft node. -func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps SnapshotStore, - peerStore PeerStore, trans Transport) (*Raft, error) { - // Validate the configuration - if err := ValidateConfig(conf); err != nil { - return nil, err - } - - // Ensure we have a LogOutput - var logger *log.Logger - if conf.Logger != nil { - logger = conf.Logger - } else { - if conf.LogOutput == nil { - conf.LogOutput = os.Stderr - } - logger = log.New(conf.LogOutput, "", log.LstdFlags) - } - - // Try to restore the current term - currentTerm, err := stable.GetUint64(keyCurrentTerm) - if err != nil && err.Error() != "not found" { - return nil, fmt.Errorf("failed to load current term: %v", err) - } - - // Read the last log value - lastIdx, err := logs.LastIndex() - if err != nil { - return nil, fmt.Errorf("failed to find last log: %v", err) - } - - // Get the log - var lastLog Log - if lastIdx > 0 { - if err := logs.GetLog(lastIdx, &lastLog); err != nil { - return nil, fmt.Errorf("failed to get last log: %v", err) - } - } - - // Construct the list of peers that excludes us - localAddr := trans.LocalAddr() - peers, err := peerStore.Peers() - if err != nil { - return nil, fmt.Errorf("failed to get list of peers: %v", err) - } - peers = ExcludePeer(peers, localAddr) - - // Create Raft struct - r := &Raft{ - applyCh: make(chan *logFuture), - conf: conf, - fsm: fsm, - fsmCommitCh: make(chan commitTuple, 128), - fsmRestoreCh: make(chan *restoreFuture), - fsmSnapshotCh: make(chan *reqSnapshotFuture), - leaderCh: make(chan bool), - localAddr: localAddr, - logger: logger, - logs: logs, - peerCh: make(chan *peerFuture), - peers: peers, - peerStore: peerStore, - rpcCh: trans.Consumer(), - snapshots: snaps, - snapshotCh: make(chan *snapshotFuture), - shutdownCh: make(chan struct{}), - stable: stable, - trans: trans, - verifyCh: make(chan *verifyFuture, 64), - } - - // Initialize as a follower - r.setState(Follower) - - // Restore the current term and the last log - r.setCurrentTerm(currentTerm) - r.setLastLogIndex(lastLog.Index) - r.setLastLogTerm(lastLog.Term) - - // Attempt to restore a snapshot if there are any - if err := r.restoreSnapshot(); err != nil { - return nil, err - } - - // Setup a heartbeat fast-path to avoid head-of-line - // blocking where possible. It MUST be safe for this - // to be called concurrently with a blocking RPC. - trans.SetHeartbeatHandler(r.processHeartbeat) - - // Start the background work - r.goFunc(r.run) - r.goFunc(r.runFSM) - r.goFunc(r.runSnapshots) - return r, nil -} - -// Leader is used to return the current leader of the cluster. -// It may return empty string if there is no current leader -// or the leader is unknown. -func (r *Raft) Leader() string { - r.leaderLock.RLock() - leader := r.leader - r.leaderLock.RUnlock() - return leader -} - -// setLeader is used to modify the current leader of the cluster -func (r *Raft) setLeader(leader string) { - r.leaderLock.Lock() - r.leader = leader - r.leaderLock.Unlock() -} - -// Apply is used to apply a command to the FSM in a highly consistent -// manner. This returns a future that can be used to wait on the application. -// An optional timeout can be provided to limit the amount of time we wait -// for the command to be started. This must be run on the leader or it -// will fail. -func (r *Raft) Apply(cmd []byte, timeout time.Duration) ApplyFuture { - metrics.IncrCounter([]string{"raft", "apply"}, 1) - var timer <-chan time.Time - if timeout > 0 { - timer = time.After(timeout) - } - - // Create a log future, no index or term yet - logFuture := &logFuture{ - log: Log{ - Type: LogCommand, - Data: cmd, - }, - } - logFuture.init() - - select { - case <-timer: - return errorFuture{ErrEnqueueTimeout} - case <-r.shutdownCh: - return errorFuture{ErrRaftShutdown} - case r.applyCh <- logFuture: - return logFuture - } -} - -// Barrier is used to issue a command that blocks until all preceeding -// operations have been applied to the FSM. It can be used to ensure the -// FSM reflects all queued writes. An optional timeout can be provided to -// limit the amount of time we wait for the command to be started. This -// must be run on the leader or it will fail. -func (r *Raft) Barrier(timeout time.Duration) Future { - metrics.IncrCounter([]string{"raft", "barrier"}, 1) - var timer <-chan time.Time - if timeout > 0 { - timer = time.After(timeout) - } - - // Create a log future, no index or term yet - logFuture := &logFuture{ - log: Log{ - Type: LogBarrier, - }, - } - logFuture.init() - - select { - case <-timer: - return errorFuture{ErrEnqueueTimeout} - case <-r.shutdownCh: - return errorFuture{ErrRaftShutdown} - case r.applyCh <- logFuture: - return logFuture - } -} - -// VerifyLeader is used to ensure the current node is still -// the leader. This can be done to prevent stale reads when a -// new leader has potentially been elected. -func (r *Raft) VerifyLeader() Future { - metrics.IncrCounter([]string{"raft", "verify_leader"}, 1) - verifyFuture := &verifyFuture{} - verifyFuture.init() - select { - case <-r.shutdownCh: - return errorFuture{ErrRaftShutdown} - case r.verifyCh <- verifyFuture: - return verifyFuture - } -} - -// AddPeer is used to add a new peer into the cluster. This must be -// run on the leader or it will fail. -func (r *Raft) AddPeer(peer string) Future { - logFuture := &logFuture{ - log: Log{ - Type: LogAddPeer, - peer: peer, - }, - } - logFuture.init() - select { - case r.applyCh <- logFuture: - return logFuture - case <-r.shutdownCh: - return errorFuture{ErrRaftShutdown} - } -} - -// RemovePeer is used to remove a peer from the cluster. If the -// current leader is being removed, it will cause a new election -// to occur. This must be run on the leader or it will fail. -func (r *Raft) RemovePeer(peer string) Future { - logFuture := &logFuture{ - log: Log{ - Type: LogRemovePeer, - peer: peer, - }, - } - logFuture.init() - select { - case r.applyCh <- logFuture: - return logFuture - case <-r.shutdownCh: - return errorFuture{ErrRaftShutdown} - } -} - -// SetPeers is used to forcibly replace the set of internal peers and -// the peerstore with the ones specified. This can be considered unsafe. -func (r *Raft) SetPeers(p []string) Future { - peerFuture := &peerFuture{ - peers: p, - } - peerFuture.init() - - select { - case r.peerCh <- peerFuture: - return peerFuture - case <-r.shutdownCh: - return errorFuture{ErrRaftShutdown} - } -} - -// Shutdown is used to stop the Raft background routines. -// This is not a graceful operation. Provides a future that -// can be used to block until all background routines have exited. -func (r *Raft) Shutdown() Future { - r.shutdownLock.Lock() - defer r.shutdownLock.Unlock() - - if !r.shutdown { - close(r.shutdownCh) - r.shutdown = true - r.setState(Shutdown) - } - - return &shutdownFuture{r} -} - -// Snapshot is used to manually force Raft to take a snapshot. -// Returns a future that can be used to block until complete. -func (r *Raft) Snapshot() Future { - snapFuture := &snapshotFuture{} - snapFuture.init() - select { - case r.snapshotCh <- snapFuture: - return snapFuture - case <-r.shutdownCh: - return errorFuture{ErrRaftShutdown} - } - -} - -// State is used to return the current raft state. -func (r *Raft) State() RaftState { - return r.getState() -} - -// LeaderCh is used to get a channel which delivers signals on -// acquiring or losing leadership. It sends true if we become -// the leader, and false if we lose it. The channel is not buffered, -// and does not block on writes. -func (r *Raft) LeaderCh() <-chan bool { - return r.leaderCh -} - -func (r *Raft) String() string { - return fmt.Sprintf("Node at %s [%v]", r.localAddr, r.getState()) -} - -// LastContact returns the time of last contact by a leader. -// This only makes sense if we are currently a follower. -func (r *Raft) LastContact() time.Time { - r.lastContactLock.RLock() - last := r.lastContact - r.lastContactLock.RUnlock() - return last -} - -// Stats is used to return a map of various internal stats. This should only -// be used for informative purposes or debugging. -func (r *Raft) Stats() map[string]string { - toString := func(v uint64) string { - return strconv.FormatUint(v, 10) - } - s := map[string]string{ - "state": r.getState().String(), - "term": toString(r.getCurrentTerm()), - "last_log_index": toString(r.getLastLogIndex()), - "last_log_term": toString(r.getLastLogTerm()), - "commit_index": toString(r.getCommitIndex()), - "applied_index": toString(r.getLastApplied()), - "fsm_pending": toString(uint64(len(r.fsmCommitCh))), - "last_snapshot_index": toString(r.getLastSnapshotIndex()), - "last_snapshot_term": toString(r.getLastSnapshotTerm()), - "num_peers": toString(uint64(len(r.peers))), - } - last := r.LastContact() - if last.IsZero() { - s["last_contact"] = "never" - } else if r.getState() == Leader { - s["last_contact"] = "0" - } else { - s["last_contact"] = fmt.Sprintf("%v", time.Now().Sub(last)) - } - return s -} - -// LastIndex returns the last index in stable storage, -// either from the last log or from the last snapshot. -func (r *Raft) LastIndex() uint64 { - return r.getLastIndex() -} - -// AppliedIndex returns the last index applied to the FSM. -// This is generally lagging behind the last index, especially -// for indexes that are persisted but have not yet been considered -// committed by the leader. -func (r *Raft) AppliedIndex() uint64 { - return r.getLastApplied() -} - -// runFSM is a long running goroutine responsible for applying logs -// to the FSM. This is done async of other logs since we don't want -// the FSM to block our internal operations. -func (r *Raft) runFSM() { - var lastIndex, lastTerm uint64 - for { - select { - case req := <-r.fsmRestoreCh: - // Open the snapshot - meta, source, err := r.snapshots.Open(req.ID) - if err != nil { - req.respond(fmt.Errorf("failed to open snapshot %v: %v", req.ID, err)) - continue - } - - // Attempt to restore - start := time.Now() - if err := r.fsm.Restore(source); err != nil { - req.respond(fmt.Errorf("failed to restore snapshot %v: %v", req.ID, err)) - source.Close() - continue - } - source.Close() - metrics.MeasureSince([]string{"raft", "fsm", "restore"}, start) - - // Update the last index and term - lastIndex = meta.Index - lastTerm = meta.Term - req.respond(nil) - - case req := <-r.fsmSnapshotCh: - // Get our peers - peers, err := r.peerStore.Peers() - if err != nil { - req.respond(err) - } - - // Start a snapshot - start := time.Now() - snap, err := r.fsm.Snapshot() - metrics.MeasureSince([]string{"raft", "fsm", "snapshot"}, start) - - // Respond to the request - req.index = lastIndex - req.term = lastTerm - req.peers = peers - req.snapshot = snap - req.respond(err) - - case commitTuple := <-r.fsmCommitCh: - // Apply the log if a command - var resp interface{} - if commitTuple.log.Type == LogCommand { - start := time.Now() - resp = r.fsm.Apply(commitTuple.log) - metrics.MeasureSince([]string{"raft", "fsm", "apply"}, start) - } - - // Update the indexes - lastIndex = commitTuple.log.Index - lastTerm = commitTuple.log.Term - - // Invoke the future if given - if commitTuple.future != nil { - commitTuple.future.response = resp - commitTuple.future.respond(nil) - } - case <-r.shutdownCh: - return - } - } -} - -// run is a long running goroutine that runs the Raft FSM. -func (r *Raft) run() { - for { - // Check if we are doing a shutdown - select { - case <-r.shutdownCh: - // Clear the leader to prevent forwarding - r.setLeader("") - return - default: - } - - // Enter into a sub-FSM - switch r.getState() { - case Follower: - r.runFollower() - case Candidate: - r.runCandidate() - case Leader: - r.runLeader() - } - } -} - -// runFollower runs the FSM for a follower. -func (r *Raft) runFollower() { - didWarn := false - r.logger.Printf("[INFO] raft: %v entering Follower state", r) - heartbeatTimer := randomTimeout(r.conf.HeartbeatTimeout) - for { - select { - case rpc := <-r.rpcCh: - r.processRPC(rpc) - - case a := <-r.applyCh: - // Reject any operations since we are not the leader - a.respond(ErrNotLeader) - - case v := <-r.verifyCh: - // Reject any operations since we are not the leader - v.respond(ErrNotLeader) - - case p := <-r.peerCh: - // Set the peers - r.peers = ExcludePeer(p.peers, r.localAddr) - p.respond(r.peerStore.SetPeers(p.peers)) - - case <-heartbeatTimer: - // Restart the heartbeat timer - heartbeatTimer = randomTimeout(r.conf.HeartbeatTimeout) - - // Check if we have had a successful contact - lastContact := r.LastContact() - if time.Now().Sub(lastContact) < r.conf.HeartbeatTimeout { - continue - } - - // Heartbeat failed! Transition to the candidate state - r.setLeader("") - if len(r.peers) == 0 && !r.conf.EnableSingleNode { - if !didWarn { - r.logger.Printf("[WARN] raft: EnableSingleNode disabled, and no known peers. Aborting election.") - didWarn = true - } - } else { - r.logger.Printf("[WARN] raft: Heartbeat timeout reached, starting election") - r.setState(Candidate) - return - } - - case <-r.shutdownCh: - return - } - } -} - -// runCandidate runs the FSM for a candidate. -func (r *Raft) runCandidate() { - r.logger.Printf("[INFO] raft: %v entering Candidate state", r) - - // Start vote for us, and set a timeout - voteCh := r.electSelf() - electionTimer := randomTimeout(r.conf.ElectionTimeout) - - // Tally the votes, need a simple majority - grantedVotes := 0 - votesNeeded := r.quorumSize() - r.logger.Printf("[DEBUG] raft: Votes needed: %d", votesNeeded) - - for r.getState() == Candidate { - select { - case rpc := <-r.rpcCh: - r.processRPC(rpc) - - case vote := <-voteCh: - // Check if the term is greater than ours, bail - if vote.Term > r.getCurrentTerm() { - r.logger.Printf("[DEBUG] raft: Newer term discovered, fallback to follower") - r.setState(Follower) - r.setCurrentTerm(vote.Term) - return - } - - // Check if the vote is granted - if vote.Granted { - grantedVotes++ - r.logger.Printf("[DEBUG] raft: Vote granted. Tally: %d", grantedVotes) - } - - // Check if we've become the leader - if grantedVotes >= votesNeeded { - r.logger.Printf("[INFO] raft: Election won. Tally: %d", grantedVotes) - r.setState(Leader) - r.setLeader(r.localAddr) - return - } - - case a := <-r.applyCh: - // Reject any operations since we are not the leader - a.respond(ErrNotLeader) - - case v := <-r.verifyCh: - // Reject any operations since we are not the leader - v.respond(ErrNotLeader) - - case p := <-r.peerCh: - // Set the peers - r.peers = ExcludePeer(p.peers, r.localAddr) - p.respond(r.peerStore.SetPeers(p.peers)) - // Become a follower again - r.setState(Follower) - return - - case <-electionTimer: - // Election failed! Restart the election. We simply return, - // which will kick us back into runCandidate - r.logger.Printf("[WARN] raft: Election timeout reached, restarting election") - return - - case <-r.shutdownCh: - return - } - } -} - -// runLeader runs the FSM for a leader. Do the setup here and drop into -// the leaderLoop for the hot loop. -func (r *Raft) runLeader() { - r.logger.Printf("[INFO] raft: %v entering Leader state", r) - - // Notify that we are the leader - asyncNotifyBool(r.leaderCh, true) - - // Setup leader state - r.leaderState.commitCh = make(chan struct{}, 1) - r.leaderState.inflight = newInflight(r.leaderState.commitCh) - r.leaderState.replState = make(map[string]*followerReplication) - r.leaderState.notify = make(map[*verifyFuture]struct{}) - r.leaderState.stepDown = make(chan struct{}, 1) - - // Cleanup state on step down - defer func() { - // Stop replication - for _, p := range r.leaderState.replState { - close(p.stopCh) - } - - // Cancel inflight requests - r.leaderState.inflight.Cancel(ErrLeadershipLost) - - // Respond to any pending verify requests - for future := range r.leaderState.notify { - future.respond(ErrLeadershipLost) - } - - // Clear all the state - r.leaderState.commitCh = nil - r.leaderState.inflight = nil - r.leaderState.replState = nil - r.leaderState.notify = nil - r.leaderState.stepDown = nil - - // If we are stepping down for some reason, no known leader. - // We may have stepped down due to an RPC call, which would - // provide the leader, so we cannot always blank this out. - r.leaderLock.Lock() - if r.leader == r.localAddr { - r.leader = "" - } - r.leaderLock.Unlock() - - // Notify that we are not the leader - asyncNotifyBool(r.leaderCh, false) - }() - - // Start a replication routine for each peer - for _, peer := range r.peers { - r.startReplication(peer) - } - - // Dispatch a no-op log first. Instead of LogNoop, - // we use a LogAddPeer with our peerset. This acts like - // a no-op as well, but when doing an initial bootstrap, ensures - // that all nodes share a common peerset. - peerSet := append([]string{r.localAddr}, r.peers...) - noop := &logFuture{ - log: Log{ - Type: LogAddPeer, - Data: encodePeers(peerSet, r.trans), - }, - } - r.dispatchLogs([]*logFuture{noop}) - - // Disable EnableSingleNode after we've been elected leader. - // This is to prevent a split brain in the future, if we are removed - // from the cluster and then elect ourself as leader. - if r.conf.DisableBootstrapAfterElect && r.conf.EnableSingleNode { - r.logger.Printf("[INFO] raft: Disabling EnableSingleNode (bootstrap)") - r.conf.EnableSingleNode = false - } - - // Sit in the leader loop until we step down - r.leaderLoop() -} - -// startReplication is a helper to setup state and start async replication to a peer. -func (r *Raft) startReplication(peer string) { - lastIdx := r.getLastIndex() - s := &followerReplication{ - peer: peer, - inflight: r.leaderState.inflight, - stopCh: make(chan uint64, 1), - triggerCh: make(chan struct{}, 1), - currentTerm: r.getCurrentTerm(), - matchIndex: 0, - nextIndex: lastIdx + 1, - lastContact: time.Now(), - notifyCh: make(chan struct{}, 1), - stepDown: r.leaderState.stepDown, - } - r.leaderState.replState[peer] = s - r.goFunc(func() { r.replicate(s) }) - asyncNotifyCh(s.triggerCh) -} - -// leaderLoop is the hot loop for a leader. It is invoked -// after all the various leader setup is done. -func (r *Raft) leaderLoop() { - lease := time.After(r.conf.LeaderLeaseTimeout) - for r.getState() == Leader { - select { - case rpc := <-r.rpcCh: - r.processRPC(rpc) - - case <-r.leaderState.stepDown: - r.setState(Follower) - - case <-r.leaderState.commitCh: - // Get the committed messages - committed := r.leaderState.inflight.Committed() - for e := committed.Front(); e != nil; e = e.Next() { - // Measure the commit time - commitLog := e.Value.(*logFuture) - metrics.MeasureSince([]string{"raft", "commitTime"}, commitLog.dispatch) - - // Increment the commit index - idx := commitLog.log.Index - r.setCommitIndex(idx) - r.processLogs(idx, commitLog) - } - - case v := <-r.verifyCh: - if v.quorumSize == 0 { - // Just dispatched, start the verification - r.verifyLeader(v) - - } else if v.votes < v.quorumSize { - // Early return, means there must be a new leader - r.logger.Printf("[WARN] raft: New leader elected, stepping down") - r.setState(Follower) - delete(r.leaderState.notify, v) - v.respond(ErrNotLeader) - - } else { - // Quorum of members agree, we are still leader - delete(r.leaderState.notify, v) - v.respond(nil) - } - - case p := <-r.peerCh: - p.respond(ErrLeader) - - case newLog := <-r.applyCh: - // Group commit, gather all the ready commits - ready := []*logFuture{newLog} - for i := 0; i < r.conf.MaxAppendEntries; i++ { - select { - case newLog := <-r.applyCh: - ready = append(ready, newLog) - default: - break - } - } - - // Handle any peer set changes - n := len(ready) - for i := 0; i < n; i++ { - // Special case AddPeer and RemovePeer - log := ready[i] - if log.log.Type != LogAddPeer && log.log.Type != LogRemovePeer { - continue - } - - // Check if this log should be ignored - if !r.preparePeerChange(log) { - ready[i], ready[n-1] = ready[n-1], nil - n-- - i-- - continue - } - - // Apply peer set changes early - r.processLog(&log.log, nil, true) - } - - // Nothing to do if all logs are invalid - if n == 0 { - continue - } - - // Dispatch the logs - ready = ready[:n] - r.dispatchLogs(ready) - - case <-lease: - // Check if we've exceeded the lease, potentially stepping down - maxDiff := r.checkLeaderLease() - - // Next check interval should adjust for the last node we've - // contacted, without going negative - checkInterval := r.conf.LeaderLeaseTimeout - maxDiff - if checkInterval < minCheckInterval { - checkInterval = minCheckInterval - } - - // Renew the lease timer - lease = time.After(checkInterval) - - case <-r.shutdownCh: - return - } - } -} - -// verifyLeader must be called from the main thread for safety. -// Causes the followers to attempt an immediate heartbeat. -func (r *Raft) verifyLeader(v *verifyFuture) { - // Current leader always votes for self - v.votes = 1 - - // Set the quorum size, hot-path for single node - v.quorumSize = r.quorumSize() - if v.quorumSize == 1 { - v.respond(nil) - return - } - - // Track this request - v.notifyCh = r.verifyCh - r.leaderState.notify[v] = struct{}{} - - // Trigger immediate heartbeats - for _, repl := range r.leaderState.replState { - repl.notifyLock.Lock() - repl.notify = append(repl.notify, v) - repl.notifyLock.Unlock() - asyncNotifyCh(repl.notifyCh) - } -} - -// checkLeaderLease is used to check if we can contact a quorum of nodes -// within the last leader lease interval. If not, we need to step down, -// as we may have lost connectivity. Returns the maximum duration without -// contact. -func (r *Raft) checkLeaderLease() time.Duration { - // Track contacted nodes, we can always contact ourself - contacted := 1 - - // Check each follower - var maxDiff time.Duration - now := time.Now() - for peer, f := range r.leaderState.replState { - diff := now.Sub(f.LastContact()) - if diff <= r.conf.LeaderLeaseTimeout { - contacted++ - if diff > maxDiff { - maxDiff = diff - } - } else { - // Log at least once at high value, then debug. Otherwise it gets very verbose. - if diff <= 3*r.conf.LeaderLeaseTimeout { - r.logger.Printf("[WARN] raft: Failed to contact %v in %v", peer, diff) - } else { - r.logger.Printf("[DEBUG] raft: Failed to contact %v in %v", peer, diff) - } - } - metrics.AddSample([]string{"raft", "leader", "lastContact"}, float32(diff/time.Millisecond)) - } - - // Verify we can contact a quorum - quorum := r.quorumSize() - if contacted < quorum { - r.logger.Printf("[WARN] raft: Failed to contact quorum of nodes, stepping down") - r.setState(Follower) - } - return maxDiff -} - -// quorumSize is used to return the quorum size -func (r *Raft) quorumSize() int { - return ((len(r.peers) + 1) / 2) + 1 -} - -// preparePeerChange checks if a LogAddPeer or LogRemovePeer should be performed, -// and properly formats the data field on the log before dispatching it. -func (r *Raft) preparePeerChange(l *logFuture) bool { - // Check if this is a known peer - p := l.log.peer - knownPeer := PeerContained(r.peers, p) || r.localAddr == p - - // Ignore known peers on add - if l.log.Type == LogAddPeer && knownPeer { - l.respond(ErrKnownPeer) - return false - } - - // Ignore unknown peers on remove - if l.log.Type == LogRemovePeer && !knownPeer { - l.respond(ErrUnknownPeer) - return false - } - - // Construct the peer set - var peerSet []string - if l.log.Type == LogAddPeer { - peerSet = append([]string{p, r.localAddr}, r.peers...) - } else { - peerSet = ExcludePeer(append([]string{r.localAddr}, r.peers...), p) - } - - // Setup the log - l.log.Data = encodePeers(peerSet, r.trans) - return true -} - -// dispatchLog is called to push a log to disk, mark it -// as inflight and begin replication of it. -func (r *Raft) dispatchLogs(applyLogs []*logFuture) { - now := time.Now() - defer metrics.MeasureSince([]string{"raft", "leader", "dispatchLog"}, now) - - term := r.getCurrentTerm() - lastIndex := r.getLastIndex() - logs := make([]*Log, len(applyLogs)) - - for idx, applyLog := range applyLogs { - applyLog.dispatch = now - applyLog.log.Index = lastIndex + uint64(idx) + 1 - applyLog.log.Term = term - applyLog.policy = newMajorityQuorum(len(r.peers) + 1) - logs[idx] = &applyLog.log - } - - // Write the log entry locally - if err := r.logs.StoreLogs(logs); err != nil { - r.logger.Printf("[ERR] raft: Failed to commit logs: %v", err) - for _, applyLog := range applyLogs { - applyLog.respond(err) - } - r.setState(Follower) - return - } - - // Add this to the inflight logs, commit - r.leaderState.inflight.StartAll(applyLogs) - - // Update the last log since it's on disk now - r.setLastLogIndex(lastIndex + uint64(len(applyLogs))) - r.setLastLogTerm(term) - - // Notify the replicators of the new log - for _, f := range r.leaderState.replState { - asyncNotifyCh(f.triggerCh) - } -} - -// processLogs is used to process all the logs from the lastApplied -// up to the given index. -func (r *Raft) processLogs(index uint64, future *logFuture) { - // Reject logs we've applied already - lastApplied := r.getLastApplied() - if index <= lastApplied { - r.logger.Printf("[WARN] raft: Skipping application of old log: %d", index) - return - } - - // Apply all the preceding logs - for idx := r.getLastApplied() + 1; idx <= index; idx++ { - // Get the log, either from the future or from our log store - if future != nil && future.log.Index == idx { - r.processLog(&future.log, future, false) - - } else { - l := new(Log) - if err := r.logs.GetLog(idx, l); err != nil { - r.logger.Printf("[ERR] raft: Failed to get log at %d: %v", idx, err) - panic(err) - } - r.processLog(l, nil, false) - } - - // Update the lastApplied index and term - r.setLastApplied(idx) - } -} - -// processLog is invoked to process the application of a single committed log. -func (r *Raft) processLog(l *Log, future *logFuture, precommit bool) { - switch l.Type { - case LogBarrier: - // Barrier is handled by the FSM - fallthrough - - case LogCommand: - // Forward to the fsm handler - select { - case r.fsmCommitCh <- commitTuple{l, future}: - case <-r.shutdownCh: - if future != nil { - future.respond(ErrRaftShutdown) - } - } - - // Return so that the future is only responded to - // by the FSM handler when the application is done - return - - case LogAddPeer: - fallthrough - case LogRemovePeer: - peers := decodePeers(l.Data, r.trans) - r.logger.Printf("[DEBUG] raft: Node %v updated peer set (%v): %v", r.localAddr, l.Type, peers) - - // If the peer set does not include us, remove all other peers - removeSelf := !PeerContained(peers, r.localAddr) && l.Type == LogRemovePeer - if removeSelf { - r.peers = nil - r.peerStore.SetPeers([]string{r.localAddr}) - } else { - r.peers = ExcludePeer(peers, r.localAddr) - r.peerStore.SetPeers(peers) - } - - // Handle replication if we are the leader - if r.getState() == Leader { - for _, p := range r.peers { - if _, ok := r.leaderState.replState[p]; !ok { - r.logger.Printf("[INFO] raft: Added peer %v, starting replication", p) - r.startReplication(p) - } - } - } - - // Stop replication for old nodes - if r.getState() == Leader && !precommit { - var toDelete []string - for _, repl := range r.leaderState.replState { - if !PeerContained(r.peers, repl.peer) { - r.logger.Printf("[INFO] raft: Removed peer %v, stopping replication (Index: %d)", repl.peer, l.Index) - - // Replicate up to this index and stop - repl.stopCh <- l.Index - close(repl.stopCh) - toDelete = append(toDelete, repl.peer) - } - } - for _, name := range toDelete { - delete(r.leaderState.replState, name) - } - } - - // Handle removing ourself - if removeSelf && !precommit { - if r.conf.ShutdownOnRemove { - r.logger.Printf("[INFO] raft: Removed ourself, shutting down") - r.Shutdown() - } else { - r.logger.Printf("[INFO] raft: Removed ourself, transitioning to follower") - r.setState(Follower) - } - } - - case LogNoop: - // Ignore the no-op - default: - r.logger.Printf("[ERR] raft: Got unrecognized log type: %#v", l) - } - - // Invoke the future if given - if future != nil && !precommit { - future.respond(nil) - } -} - -// processRPC is called to handle an incoming RPC request. -func (r *Raft) processRPC(rpc RPC) { - switch cmd := rpc.Command.(type) { - case *AppendEntriesRequest: - r.appendEntries(rpc, cmd) - case *RequestVoteRequest: - r.requestVote(rpc, cmd) - case *InstallSnapshotRequest: - r.installSnapshot(rpc, cmd) - default: - r.logger.Printf("[ERR] raft: Got unexpected command: %#v", rpc.Command) - rpc.Respond(nil, fmt.Errorf("unexpected command")) - } -} - -// processHeartbeat is a special handler used just for heartbeat requests -// so that they can be fast-pathed if a transport supports it. -func (r *Raft) processHeartbeat(rpc RPC) { - defer metrics.MeasureSince([]string{"raft", "rpc", "processHeartbeat"}, time.Now()) - - // Check if we are shutdown, just ignore the RPC - select { - case <-r.shutdownCh: - return - default: - } - - // Ensure we are only handling a heartbeat - switch cmd := rpc.Command.(type) { - case *AppendEntriesRequest: - r.appendEntries(rpc, cmd) - default: - r.logger.Printf("[ERR] raft: Expected heartbeat, got command: %#v", rpc.Command) - rpc.Respond(nil, fmt.Errorf("unexpected command")) - } -} - -// appendEntries is invoked when we get an append entries RPC call. -func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) { - defer metrics.MeasureSince([]string{"raft", "rpc", "appendEntries"}, time.Now()) - // Setup a response - resp := &AppendEntriesResponse{ - Term: r.getCurrentTerm(), - LastLog: r.getLastIndex(), - Success: false, - } - var rpcErr error - defer func() { - rpc.Respond(resp, rpcErr) - }() - - // Ignore an older term - if a.Term < r.getCurrentTerm() { - return - } - - // Increase the term if we see a newer one, also transition to follower - // if we ever get an appendEntries call - if a.Term > r.getCurrentTerm() || r.getState() != Follower { - // Ensure transition to follower - r.setState(Follower) - r.setCurrentTerm(a.Term) - resp.Term = a.Term - } - - // Save the current leader - r.setLeader(r.trans.DecodePeer(a.Leader)) - - // Verify the last log entry - if a.PrevLogEntry > 0 { - lastIdx, lastTerm := r.getLastEntry() - - var prevLogTerm uint64 - if a.PrevLogEntry == lastIdx { - prevLogTerm = lastTerm - - } else { - var prevLog Log - if err := r.logs.GetLog(a.PrevLogEntry, &prevLog); err != nil { - r.logger.Printf("[WARN] raft: Failed to get previous log: %d %v (last: %d)", - a.PrevLogEntry, err, lastIdx) - return - } - prevLogTerm = prevLog.Term - } - - if a.PrevLogTerm != prevLogTerm { - r.logger.Printf("[WARN] raft: Previous log term mis-match: ours: %d remote: %d", - prevLogTerm, a.PrevLogTerm) - return - } - } - - // Process any new entries - if n := len(a.Entries); n > 0 { - start := time.Now() - first := a.Entries[0] - last := a.Entries[n-1] - - // Delete any conflicting entries - lastLogIdx := r.getLastLogIndex() - if first.Index <= lastLogIdx { - r.logger.Printf("[WARN] raft: Clearing log suffix from %d to %d", first.Index, lastLogIdx) - if err := r.logs.DeleteRange(first.Index, lastLogIdx); err != nil { - r.logger.Printf("[ERR] raft: Failed to clear log suffix: %v", err) - return - } - } - - // Append the entry - if err := r.logs.StoreLogs(a.Entries); err != nil { - r.logger.Printf("[ERR] raft: Failed to append to logs: %v", err) - return - } - - // Update the lastLog - r.setLastLogIndex(last.Index) - r.setLastLogTerm(last.Term) - metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "storeLogs"}, start) - } - - // Update the commit index - if a.LeaderCommitIndex > 0 && a.LeaderCommitIndex > r.getCommitIndex() { - start := time.Now() - idx := min(a.LeaderCommitIndex, r.getLastIndex()) - r.setCommitIndex(idx) - r.processLogs(idx, nil) - metrics.MeasureSince([]string{"raft", "rpc", "appendEntries", "processLogs"}, start) - } - - // Everything went well, set success - resp.Success = true - r.lastContactLock.Lock() - r.lastContact = time.Now() - r.lastContactLock.Unlock() - return -} - -// requestVote is invoked when we get an request vote RPC call. -func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) { - defer metrics.MeasureSince([]string{"raft", "rpc", "requestVote"}, time.Now()) - // Setup a response - resp := &RequestVoteResponse{ - Term: r.getCurrentTerm(), - Peers: encodePeers(r.peers, r.trans), - Granted: false, - } - var rpcErr error - defer func() { - rpc.Respond(resp, rpcErr) - }() - - // Check if we have an existing leader - if leader := r.Leader(); leader != "" { - r.logger.Printf("[WARN] raft: Rejecting vote from %v since we have a leader: %v", - r.trans.DecodePeer(req.Candidate), leader) - return - } - - // Ignore an older term - if req.Term < r.getCurrentTerm() { - return - } - - // Increase the term if we see a newer one - if req.Term > r.getCurrentTerm() { - // Ensure transition to follower - r.setState(Follower) - r.setCurrentTerm(req.Term) - resp.Term = req.Term - } - - // Check if we have voted yet - lastVoteTerm, err := r.stable.GetUint64(keyLastVoteTerm) - if err != nil && err.Error() != "not found" { - r.logger.Printf("[ERR] raft: Failed to get last vote term: %v", err) - return - } - lastVoteCandBytes, err := r.stable.Get(keyLastVoteCand) - if err != nil && err.Error() != "not found" { - r.logger.Printf("[ERR] raft: Failed to get last vote candidate: %v", err) - return - } - - // Check if we've voted in this election before - if lastVoteTerm == req.Term && lastVoteCandBytes != nil { - r.logger.Printf("[INFO] raft: Duplicate RequestVote for same term: %d", req.Term) - if bytes.Compare(lastVoteCandBytes, req.Candidate) == 0 { - r.logger.Printf("[WARN] raft: Duplicate RequestVote from candidate: %s", req.Candidate) - resp.Granted = true - } - return - } - - // Reject if their term is older - lastIdx, lastTerm := r.getLastEntry() - if lastTerm > req.LastLogTerm { - r.logger.Printf("[WARN] raft: Rejecting vote from %v since our last term is greater (%d, %d)", - r.trans.DecodePeer(req.Candidate), lastTerm, req.LastLogTerm) - return - } - - if lastIdx > req.LastLogIndex { - r.logger.Printf("[WARN] raft: Rejecting vote from %v since our last index is greater (%d, %d)", - r.trans.DecodePeer(req.Candidate), lastIdx, req.LastLogIndex) - return - } - - // Persist a vote for safety - if err := r.persistVote(req.Term, req.Candidate); err != nil { - r.logger.Printf("[ERR] raft: Failed to persist vote: %v", err) - return - } - - resp.Granted = true - return -} - -// installSnapshot is invoked when we get a InstallSnapshot RPC call. -// We must be in the follower state for this, since it means we are -// too far behind a leader for log replay. -func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) { - defer metrics.MeasureSince([]string{"raft", "rpc", "installSnapshot"}, time.Now()) - // Setup a response - resp := &InstallSnapshotResponse{ - Term: r.getCurrentTerm(), - Success: false, - } - var rpcErr error - defer func() { - rpc.Respond(resp, rpcErr) - }() - - // Ignore an older term - if req.Term < r.getCurrentTerm() { - return - } - - // Increase the term if we see a newer one - if req.Term > r.getCurrentTerm() { - // Ensure transition to follower - r.setState(Follower) - r.setCurrentTerm(req.Term) - resp.Term = req.Term - } - - // Save the current leader - r.setLeader(r.trans.DecodePeer(req.Leader)) - - // Create a new snapshot - sink, err := r.snapshots.Create(req.LastLogIndex, req.LastLogTerm, req.Peers) - if err != nil { - r.logger.Printf("[ERR] raft: Failed to create snapshot to install: %v", err) - rpcErr = fmt.Errorf("failed to create snapshot: %v", err) - return - } - - // Spill the remote snapshot to disk - n, err := io.Copy(sink, rpc.Reader) - if err != nil { - sink.Cancel() - r.logger.Printf("[ERR] raft: Failed to copy snapshot: %v", err) - rpcErr = err - return - } - - // Check that we received it all - if n != req.Size { - sink.Cancel() - r.logger.Printf("[ERR] raft: Failed to receive whole snapshot: %d / %d", n, req.Size) - rpcErr = fmt.Errorf("short read") - return - } - - // Finalize the snapshot - if err := sink.Close(); err != nil { - r.logger.Printf("[ERR] raft: Failed to finalize snapshot: %v", err) - rpcErr = err - return - } - r.logger.Printf("[INFO] raft: Copied %d bytes to local snapshot", n) - - // Restore snapshot - future := &restoreFuture{ID: sink.ID()} - future.init() - select { - case r.fsmRestoreCh <- future: - case <-r.shutdownCh: - future.respond(ErrRaftShutdown) - return - } - - // Wait for the restore to happen - if err := future.Error(); err != nil { - r.logger.Printf("[ERR] raft: Failed to restore snapshot: %v", err) - rpcErr = err - return - } - - // Update the lastApplied so we don't replay old logs - r.setLastApplied(req.LastLogIndex) - - // Update the last stable snapshot info - r.setLastSnapshotIndex(req.LastLogIndex) - r.setLastSnapshotTerm(req.LastLogTerm) - - // Restore the peer set - peers := decodePeers(req.Peers, r.trans) - r.peers = ExcludePeer(peers, r.localAddr) - r.peerStore.SetPeers(peers) - - // Compact logs, continue even if this fails - if err := r.compactLogs(req.LastLogIndex); err != nil { - r.logger.Printf("[ERR] raft: Failed to compact logs: %v", err) - } - - r.logger.Printf("[INFO] raft: Installed remote snapshot") - resp.Success = true - r.lastContactLock.Lock() - r.lastContact = time.Now() - r.lastContactLock.Unlock() - return -} - -// electSelf is used to send a RequestVote RPC to all peers, -// and vote for ourself. This has the side affecting of incrementing -// the current term. The response channel returned is used to wait -// for all the responses (including a vote for ourself). -func (r *Raft) electSelf() <-chan *RequestVoteResponse { - // Create a response channel - respCh := make(chan *RequestVoteResponse, len(r.peers)+1) - - // Increment the term - r.setCurrentTerm(r.getCurrentTerm() + 1) - - // Construct the request - lastIdx, lastTerm := r.getLastEntry() - req := &RequestVoteRequest{ - Term: r.getCurrentTerm(), - Candidate: r.trans.EncodePeer(r.localAddr), - LastLogIndex: lastIdx, - LastLogTerm: lastTerm, - } - - // Construct a function to ask for a vote - askPeer := func(peer string) { - r.goFunc(func() { - defer metrics.MeasureSince([]string{"raft", "candidate", "electSelf"}, time.Now()) - resp := new(RequestVoteResponse) - err := r.trans.RequestVote(peer, req, resp) - if err != nil { - r.logger.Printf("[ERR] raft: Failed to make RequestVote RPC to %v: %v", peer, err) - resp.Term = req.Term - resp.Granted = false - } - - // If we are not a peer, we could have been removed but failed - // to receive the log message. OR it could mean an improperly configured - // cluster. Either way, we should warn - if err == nil { - peerSet := decodePeers(resp.Peers, r.trans) - if !PeerContained(peerSet, r.localAddr) { - r.logger.Printf("[WARN] raft: Remote peer %v does not have local node %v as a peer", - peer, r.localAddr) - } - } - - respCh <- resp - }) - } - - // For each peer, request a vote - for _, peer := range r.peers { - askPeer(peer) - } - - // Persist a vote for ourselves - if err := r.persistVote(req.Term, req.Candidate); err != nil { - r.logger.Printf("[ERR] raft: Failed to persist vote : %v", err) - return nil - } - - // Include our own vote - respCh <- &RequestVoteResponse{ - Term: req.Term, - Granted: true, - } - return respCh -} - -// persistVote is used to persist our vote for safety. -func (r *Raft) persistVote(term uint64, candidate []byte) error { - if err := r.stable.SetUint64(keyLastVoteTerm, term); err != nil { - return err - } - if err := r.stable.Set(keyLastVoteCand, candidate); err != nil { - return err - } - return nil -} - -// setCurrentTerm is used to set the current term in a durable manner. -func (r *Raft) setCurrentTerm(t uint64) { - // Persist to disk first - if err := r.stable.SetUint64(keyCurrentTerm, t); err != nil { - panic(fmt.Errorf("failed to save current term: %v", err)) - } - r.raftState.setCurrentTerm(t) -} - -// setState is used to update the current state. Any state -// transition causes the known leader to be cleared. This means -// that leader should be set only after updating the state. -func (r *Raft) setState(state RaftState) { - r.setLeader("") - r.raftState.setState(state) -} - -// runSnapshots is a long running goroutine used to manage taking -// new snapshots of the FSM. It runs in parallel to the FSM and -// main goroutines, so that snapshots do not block normal operation. -func (r *Raft) runSnapshots() { - for { - select { - case <-randomTimeout(r.conf.SnapshotInterval): - // Check if we should snapshot - if !r.shouldSnapshot() { - continue - } - - // Trigger a snapshot - if err := r.takeSnapshot(); err != nil { - r.logger.Printf("[ERR] raft: Failed to take snapshot: %v", err) - } - - case future := <-r.snapshotCh: - // User-triggered, run immediately - err := r.takeSnapshot() - if err != nil { - r.logger.Printf("[ERR] raft: Failed to take snapshot: %v", err) - } - future.respond(err) - - case <-r.shutdownCh: - return - } - } -} - -// shouldSnapshot checks if we meet the conditions to take -// a new snapshot. -func (r *Raft) shouldSnapshot() bool { - // Check the last snapshot index - lastSnap := r.getLastSnapshotIndex() - - // Check the last log index - lastIdx, err := r.logs.LastIndex() - if err != nil { - r.logger.Printf("[ERR] raft: Failed to get last log index: %v", err) - return false - } - - // Compare the delta to the threshold - delta := lastIdx - lastSnap - return delta >= r.conf.SnapshotThreshold -} - -// takeSnapshot is used to take a new snapshot. -func (r *Raft) takeSnapshot() error { - defer metrics.MeasureSince([]string{"raft", "snapshot", "takeSnapshot"}, time.Now()) - // Create a snapshot request - req := &reqSnapshotFuture{} - req.init() - - // Wait for dispatch or shutdown - select { - case r.fsmSnapshotCh <- req: - case <-r.shutdownCh: - return ErrRaftShutdown - } - - // Wait until we get a response - if err := req.Error(); err != nil { - return fmt.Errorf("failed to start snapshot: %v", err) - } - defer req.snapshot.Release() - - // Log that we are starting the snapshot - r.logger.Printf("[INFO] raft: Starting snapshot up to %d", req.index) - - // Encode the peerset - peerSet := encodePeers(req.peers, r.trans) - - // Create a new snapshot - start := time.Now() - sink, err := r.snapshots.Create(req.index, req.term, peerSet) - if err != nil { - return fmt.Errorf("failed to create snapshot: %v", err) - } - metrics.MeasureSince([]string{"raft", "snapshot", "create"}, start) - - // Try to persist the snapshot - start = time.Now() - if err := req.snapshot.Persist(sink); err != nil { - sink.Cancel() - return fmt.Errorf("failed to persist snapshot: %v", err) - } - metrics.MeasureSince([]string{"raft", "snapshot", "persist"}, start) - - // Close and check for error - if err := sink.Close(); err != nil { - return fmt.Errorf("failed to close snapshot: %v", err) - } - - // Update the last stable snapshot info - r.setLastSnapshotIndex(req.index) - r.setLastSnapshotTerm(req.term) - - // Compact the logs - if err := r.compactLogs(req.index); err != nil { - return err - } - - // Log completion - r.logger.Printf("[INFO] raft: Snapshot to %d complete", req.index) - return nil -} - -// compactLogs takes the last inclusive index of a snapshot -// and trims the logs that are no longer needed. -func (r *Raft) compactLogs(snapIdx uint64) error { - defer metrics.MeasureSince([]string{"raft", "compactLogs"}, time.Now()) - // Determine log ranges to compact - minLog, err := r.logs.FirstIndex() - if err != nil { - return fmt.Errorf("failed to get first log index: %v", err) - } - - // Check if we have enough logs to truncate - if r.getLastLogIndex() <= r.conf.TrailingLogs { - return nil - } - - // Truncate up to the end of the snapshot, or `TrailingLogs` - // back from the head, which ever is further back. This ensures - // at least `TrailingLogs` entries, but does not allow logs - // after the snapshot to be removed. - maxLog := min(snapIdx, r.getLastLogIndex()-r.conf.TrailingLogs) - - // Log this - r.logger.Printf("[INFO] raft: Compacting logs from %d to %d", minLog, maxLog) - - // Compact the logs - if err := r.logs.DeleteRange(minLog, maxLog); err != nil { - return fmt.Errorf("log compaction failed: %v", err) - } - return nil -} - -// restoreSnapshot attempts to restore the latest snapshots, and fails -// if none of them can be restored. This is called at initialization time, -// and is completely unsafe to call at any other time. -func (r *Raft) restoreSnapshot() error { - snapshots, err := r.snapshots.List() - if err != nil { - r.logger.Printf("[ERR] raft: Failed to list snapshots: %v", err) - return err - } - - // Try to load in order of newest to oldest - for _, snapshot := range snapshots { - _, source, err := r.snapshots.Open(snapshot.ID) - if err != nil { - r.logger.Printf("[ERR] raft: Failed to open snapshot %v: %v", snapshot.ID, err) - continue - } - defer source.Close() - - if err := r.fsm.Restore(source); err != nil { - r.logger.Printf("[ERR] raft: Failed to restore snapshot %v: %v", snapshot.ID, err) - continue - } - - // Log success - r.logger.Printf("[INFO] raft: Restored from snapshot %v", snapshot.ID) - - // Update the lastApplied so we don't replay old logs - r.setLastApplied(snapshot.Index) - - // Update the last stable snapshot info - r.setLastSnapshotIndex(snapshot.Index) - r.setLastSnapshotTerm(snapshot.Term) - - // Success! - return nil - } - - // If we had snapshots and failed to load them, its an error - if len(snapshots) > 0 { - return fmt.Errorf("failed to load any existing snapshots") - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/raft_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/raft_test.go deleted file mode 100644 index 284a5dd0e..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/raft_test.go +++ /dev/null @@ -1,1454 +0,0 @@ -package raft - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "reflect" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/hashicorp/go-msgpack/codec" -) - -// MockFSM is an implementation of the FSM interface, and just stores -// the logs sequentially. -type MockFSM struct { - sync.Mutex - logs [][]byte -} - -type MockSnapshot struct { - logs [][]byte - maxIndex int -} - -func (m *MockFSM) Apply(log *Log) interface{} { - m.Lock() - defer m.Unlock() - m.logs = append(m.logs, log.Data) - return len(m.logs) -} - -func (m *MockFSM) Snapshot() (FSMSnapshot, error) { - m.Lock() - defer m.Unlock() - return &MockSnapshot{m.logs, len(m.logs)}, nil -} - -func (m *MockFSM) Restore(inp io.ReadCloser) error { - m.Lock() - defer m.Unlock() - defer inp.Close() - hd := codec.MsgpackHandle{} - dec := codec.NewDecoder(inp, &hd) - - m.logs = nil - return dec.Decode(&m.logs) -} - -func (m *MockSnapshot) Persist(sink SnapshotSink) error { - hd := codec.MsgpackHandle{} - enc := codec.NewEncoder(sink, &hd) - if err := enc.Encode(m.logs[:m.maxIndex]); err != nil { - sink.Cancel() - return err - } - sink.Close() - return nil -} - -func (m *MockSnapshot) Release() { -} - -// Return configurations optimized for in-memory -func inmemConfig() *Config { - conf := DefaultConfig() - conf.HeartbeatTimeout = 50 * time.Millisecond - conf.ElectionTimeout = 50 * time.Millisecond - conf.LeaderLeaseTimeout = 50 * time.Millisecond - conf.CommitTimeout = time.Millisecond - return conf -} - -type cluster struct { - dirs []string - stores []*InmemStore - fsms []*MockFSM - snaps []*FileSnapshotStore - trans []*InmemTransport - rafts []*Raft -} - -func (c *cluster) Merge(other *cluster) { - c.dirs = append(c.dirs, other.dirs...) - c.stores = append(c.stores, other.stores...) - c.fsms = append(c.fsms, other.fsms...) - c.snaps = append(c.snaps, other.snaps...) - c.trans = append(c.trans, other.trans...) - c.rafts = append(c.rafts, other.rafts...) -} - -func (c *cluster) Close() { - var futures []Future - for _, r := range c.rafts { - futures = append(futures, r.Shutdown()) - } - - // Wait for shutdown - timer := time.AfterFunc(200*time.Millisecond, func() { - panic("timed out waiting for shutdown") - }) - - for _, f := range futures { - if err := f.Error(); err != nil { - panic(fmt.Errorf("shutdown future err: %v", err)) - } - } - timer.Stop() - - for _, d := range c.dirs { - os.RemoveAll(d) - } -} - -func (c *cluster) GetInState(s RaftState) []*Raft { - in := make([]*Raft, 0, 1) - for _, r := range c.rafts { - if r.State() == s { - in = append(in, r) - } - } - return in -} - -func (c *cluster) Leader() *Raft { - timeout := time.AfterFunc(400*time.Millisecond, func() { - panic("timeout waiting for leader") - }) - defer timeout.Stop() - - for len(c.GetInState(Leader)) < 1 { - time.Sleep(time.Millisecond) - } - leaders := c.GetInState(Leader) - if len(leaders) != 1 { - panic(fmt.Errorf("expected one leader: %v", leaders)) - } - return leaders[0] -} - -func (c *cluster) FullyConnect() { - log.Printf("[WARN] Fully Connecting") - for i, t1 := range c.trans { - for j, t2 := range c.trans { - if i != j { - t1.Connect(t2.LocalAddr(), t2) - t2.Connect(t1.LocalAddr(), t1) - } - } - } -} - -func (c *cluster) Disconnect(a string) { - log.Printf("[WARN] Disconnecting %v", a) - for _, t := range c.trans { - if t.localAddr == a { - t.DisconnectAll() - } else { - t.Disconnect(a) - } - } -} - -func (c *cluster) EnsureLeader(t *testing.T, expect string) { - limit := time.Now().Add(400 * time.Millisecond) -CHECK: - for _, r := range c.rafts { - leader := r.Leader() - if expect == "" { - if leader != "" { - if time.Now().After(limit) { - t.Fatalf("leader %v expected nil", leader) - } else { - goto WAIT - } - } - } else { - if leader == "" || leader != expect { - if time.Now().After(limit) { - t.Fatalf("leader %v expected %v", leader, expect) - } else { - goto WAIT - } - } - } - } - - return -WAIT: - time.Sleep(10 * time.Millisecond) - goto CHECK -} - -func (c *cluster) EnsureSame(t *testing.T) { - limit := time.Now().Add(400 * time.Millisecond) - first := c.fsms[0] - -CHECK: - first.Lock() - for i, fsm := range c.fsms { - if i == 0 { - continue - } - fsm.Lock() - - if len(first.logs) != len(fsm.logs) { - fsm.Unlock() - if time.Now().After(limit) { - t.Fatalf("length mismatch: %d %d", - len(first.logs), len(fsm.logs)) - } else { - goto WAIT - } - } - - for idx := 0; idx < len(first.logs); idx++ { - if bytes.Compare(first.logs[idx], fsm.logs[idx]) != 0 { - fsm.Unlock() - if time.Now().After(limit) { - t.Fatalf("log mismatch at index %d", idx) - } else { - goto WAIT - } - } - } - fsm.Unlock() - } - - first.Unlock() - return - -WAIT: - first.Unlock() - time.Sleep(20 * time.Millisecond) - goto CHECK -} - -func raftToPeerSet(r *Raft) map[string]struct{} { - peers := make(map[string]struct{}) - peers[r.localAddr] = struct{}{} - - raftPeers, _ := r.peerStore.Peers() - for _, p := range raftPeers { - peers[p] = struct{}{} - } - return peers -} - -func (c *cluster) EnsureSamePeers(t *testing.T) { - limit := time.Now().Add(400 * time.Millisecond) - peerSet := raftToPeerSet(c.rafts[0]) - -CHECK: - for i, raft := range c.rafts { - if i == 0 { - continue - } - - otherSet := raftToPeerSet(raft) - if !reflect.DeepEqual(peerSet, otherSet) { - if time.Now().After(limit) { - t.Fatalf("peer mismatch: %v %v", peerSet, otherSet) - } else { - goto WAIT - } - } - } - return - -WAIT: - time.Sleep(20 * time.Millisecond) - goto CHECK -} - -func MakeCluster(n int, t *testing.T, conf *Config) *cluster { - c := &cluster{} - peers := make([]string, 0, n) - - // Setup the stores and transports - for i := 0; i < n; i++ { - dir, err := ioutil.TempDir("", "raft") - if err != nil { - t.Fatalf("err: %v ", err) - } - store := NewInmemStore() - c.dirs = append(c.dirs, dir) - c.stores = append(c.stores, store) - c.fsms = append(c.fsms, &MockFSM{}) - - dir2, snap := FileSnapTest(t) - c.dirs = append(c.dirs, dir2) - c.snaps = append(c.snaps, snap) - - addr, trans := NewInmemTransport() - c.trans = append(c.trans, trans) - peers = append(peers, addr) - } - - // Wire the transports together - c.FullyConnect() - - // Create all the rafts - for i := 0; i < n; i++ { - if conf == nil { - conf = inmemConfig() - } - if n == 1 { - conf.EnableSingleNode = true - } - - logs := c.stores[i] - store := c.stores[i] - snap := c.snaps[i] - trans := c.trans[i] - peerStore := &StaticPeers{StaticPeers: peers} - - raft, err := NewRaft(conf, c.fsms[i], logs, store, snap, peerStore, trans) - if err != nil { - t.Fatalf("err: %v", err) - } - c.rafts = append(c.rafts, raft) - } - - return c -} - -func MakeClusterNoPeers(n int, t *testing.T, conf *Config) *cluster { - c := &cluster{} - - // Setup the stores and transports - for i := 0; i < n; i++ { - dir, err := ioutil.TempDir("", "raft") - if err != nil { - t.Fatalf("err: %v ", err) - } - store := NewInmemStore() - c.dirs = append(c.dirs, dir) - c.stores = append(c.stores, store) - c.fsms = append(c.fsms, &MockFSM{}) - - dir2, snap := FileSnapTest(t) - c.dirs = append(c.dirs, dir2) - c.snaps = append(c.snaps, snap) - - _, trans := NewInmemTransport() - c.trans = append(c.trans, trans) - } - - // Wire the transports together - c.FullyConnect() - - // Create all the rafts - for i := 0; i < n; i++ { - if conf == nil { - conf = inmemConfig() - } - - logs := c.stores[i] - store := c.stores[i] - snap := c.snaps[i] - trans := c.trans[i] - peerStore := &StaticPeers{} - - raft, err := NewRaft(conf, c.fsms[i], logs, store, snap, peerStore, trans) - if err != nil { - t.Fatalf("err: %v", err) - } - c.rafts = append(c.rafts, raft) - } - - return c -} - -func TestRaft_StartStop(t *testing.T) { - c := MakeCluster(1, t, nil) - c.Close() -} - -func TestRaft_AfterShutdown(t *testing.T) { - c := MakeCluster(1, t, nil) - c.Close() - raft := c.rafts[0] - - // Everything should fail now - if f := raft.Apply(nil, 0); f.Error() != ErrRaftShutdown { - t.Fatalf("should be shutdown: %v", f.Error()) - } - if f := raft.AddPeer(NewInmemAddr()); f.Error() != ErrRaftShutdown { - t.Fatalf("should be shutdown: %v", f.Error()) - } - if f := raft.RemovePeer(NewInmemAddr()); f.Error() != ErrRaftShutdown { - t.Fatalf("should be shutdown: %v", f.Error()) - } - if f := raft.Snapshot(); f.Error() != ErrRaftShutdown { - t.Fatalf("should be shutdown: %v", f.Error()) - } - - // Should be idempotent - raft.Shutdown() -} - -func TestRaft_SingleNode(t *testing.T) { - conf := inmemConfig() - c := MakeCluster(1, t, conf) - defer c.Close() - raft := c.rafts[0] - - // Watch leaderCh for change - select { - case v := <-raft.LeaderCh(): - if !v { - t.Fatalf("should become leader") - } - case <-time.After(conf.HeartbeatTimeout * 3): - t.Fatalf("timeout becoming leader") - } - - // Should be leader - if s := raft.State(); s != Leader { - t.Fatalf("expected leader: %v", s) - } - - // Should be able to apply - future := raft.Apply([]byte("test"), time.Millisecond) - if err := future.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Check the response - if future.Response().(int) != 1 { - t.Fatalf("bad response: %v", future.Response()) - } - - // Check the index - if idx := future.Index(); idx == 0 { - t.Fatalf("bad index: %d", idx) - } - - // Check that it is applied to the FSM - if len(c.fsms[0].logs) != 1 { - t.Fatalf("did not apply to FSM!") - } -} - -func TestRaft_TripleNode(t *testing.T) { - // Make the cluster - c := MakeCluster(3, t, nil) - defer c.Close() - - // Should be one leader - leader := c.Leader() - c.EnsureLeader(t, leader.localAddr) - - // Should be able to apply - future := leader.Apply([]byte("test"), time.Millisecond) - if err := future.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Wait for replication - time.Sleep(30 * time.Millisecond) - - // Check that it is applied to the FSM - for _, fsm := range c.fsms { - fsm.Lock() - num := len(fsm.logs) - fsm.Unlock() - if num != 1 { - t.Fatalf("did not apply to FSM!") - } - } -} - -func TestRaft_LeaderFail(t *testing.T) { - // Make the cluster - c := MakeCluster(3, t, nil) - defer c.Close() - - // Should be one leader - leader := c.Leader() - - // Should be able to apply - future := leader.Apply([]byte("test"), time.Millisecond) - if err := future.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Wait for replication - time.Sleep(30 * time.Millisecond) - - // Disconnect the leader now - log.Printf("[INFO] Disconnecting %v", leader) - c.Disconnect(leader.localAddr) - - // Wait for new leader - limit := time.Now().Add(200 * time.Millisecond) - var newLead *Raft - for time.Now().Before(limit) && newLead == nil { - time.Sleep(10 * time.Millisecond) - leaders := c.GetInState(Leader) - if len(leaders) == 1 && leaders[0] != leader { - newLead = leaders[0] - } - } - if newLead == nil { - t.Fatalf("expected new leader") - } - - // Ensure the term is greater - if newLead.getCurrentTerm() <= leader.getCurrentTerm() { - t.Fatalf("expected newer term! %d %d", newLead.getCurrentTerm(), leader.getCurrentTerm()) - } - - // Apply should work not work on old leader - future1 := leader.Apply([]byte("fail"), time.Millisecond) - - // Apply should work on newer leader - future2 := newLead.Apply([]byte("apply"), time.Millisecond) - - // Future2 should work - if err := future2.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Reconnect the networks - log.Printf("[INFO] Reconnecting %v", leader) - c.FullyConnect() - - // Future1 should fail - if err := future1.Error(); err != ErrLeadershipLost && err != ErrNotLeader { - t.Fatalf("err: %v", err) - } - - // Wait for log replication - c.EnsureSame(t) - - // Check two entries are applied to the FSM - for _, fsm := range c.fsms { - fsm.Lock() - if len(fsm.logs) != 2 { - t.Fatalf("did not apply both to FSM! %v", fsm.logs) - } - if bytes.Compare(fsm.logs[0], []byte("test")) != 0 { - t.Fatalf("first entry should be 'test'") - } - if bytes.Compare(fsm.logs[1], []byte("apply")) != 0 { - t.Fatalf("second entry should be 'apply'") - } - fsm.Unlock() - } -} - -func TestRaft_BehindFollower(t *testing.T) { - // Make the cluster - c := MakeCluster(3, t, nil) - defer c.Close() - - // Disconnect one follower - leader := c.Leader() - followers := c.GetInState(Follower) - behind := followers[0] - c.Disconnect(behind.localAddr) - - // Commit a lot of things - var future Future - for i := 0; i < 100; i++ { - future = leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) - } - - // Wait for the last future to apply - if err := future.Error(); err != nil { - t.Fatalf("err: %v", err) - } else { - log.Printf("[INFO] Finished apply without behind follower") - } - - // Check that we have a non zero last contact - if behind.LastContact().IsZero() { - t.Fatalf("expected previous contact") - } - - // Reconnect the behind node - c.FullyConnect() - - // Ensure all the logs are the same - c.EnsureSame(t) - - // Ensure one leader - leader = c.Leader() - c.EnsureLeader(t, leader.localAddr) -} - -func TestRaft_ApplyNonLeader(t *testing.T) { - // Make the cluster - c := MakeCluster(3, t, nil) - defer c.Close() - - // Wait for a leader - c.Leader() - time.Sleep(10 * time.Millisecond) - - // Try to apply to them - followers := c.GetInState(Follower) - if len(followers) != 2 { - t.Fatalf("Expected 2 followers") - } - follower := followers[0] - - // Try to apply - future := follower.Apply([]byte("test"), time.Millisecond) - - if future.Error() != ErrNotLeader { - t.Fatalf("should not apply on follower") - } - - // Should be cached - if future.Error() != ErrNotLeader { - t.Fatalf("should not apply on follower") - } -} - -func TestRaft_ApplyConcurrent(t *testing.T) { - // Make the cluster - conf := inmemConfig() - conf.HeartbeatTimeout = 80 * time.Millisecond - conf.ElectionTimeout = 80 * time.Millisecond - c := MakeCluster(3, t, conf) - defer c.Close() - - // Wait for a leader - leader := c.Leader() - - // Create a wait group - var group sync.WaitGroup - group.Add(100) - - applyF := func(i int) { - defer group.Done() - future := leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) - if err := future.Error(); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Concurrently apply - for i := 0; i < 100; i++ { - go applyF(i) - } - - // Wait to finish - doneCh := make(chan struct{}) - go func() { - group.Wait() - close(doneCh) - }() - select { - case <-doneCh: - case <-time.After(time.Second): - t.Fatalf("timeout") - } - - // Check the FSMs - c.EnsureSame(t) -} - -func TestRaft_ApplyConcurrent_Timeout(t *testing.T) { - // Make the cluster - conf := inmemConfig() - conf.HeartbeatTimeout = 80 * time.Millisecond - conf.ElectionTimeout = 80 * time.Millisecond - c := MakeCluster(1, t, conf) - defer c.Close() - - // Wait for a leader - leader := c.Leader() - - // Enough enqueues should cause at least one timeout... - var didTimeout int32 = 0 - for i := 0; i < 200; i++ { - go func(i int) { - future := leader.Apply([]byte(fmt.Sprintf("test%d", i)), time.Microsecond) - if future.Error() == ErrEnqueueTimeout { - atomic.StoreInt32(&didTimeout, 1) - } - }(i) - } - - // Wait - time.Sleep(20 * time.Millisecond) - - // Some should have failed - if atomic.LoadInt32(&didTimeout) == 0 { - t.Fatalf("expected a timeout") - } -} - -func TestRaft_JoinNode(t *testing.T) { - // Make a cluster - c := MakeCluster(2, t, nil) - defer c.Close() - - // Apply a log to this cluster to ensure it is 'newer' - var future Future - leader := c.Leader() - future = leader.Apply([]byte("first"), 0) - if err := future.Error(); err != nil { - t.Fatalf("err: %v", err) - } else { - log.Printf("[INFO] Applied log") - } - - // Make a new cluster of 1 - c1 := MakeCluster(1, t, nil) - - // Merge clusters - c.Merge(c1) - c.FullyConnect() - - // Wait until we have 2 leaders - limit := time.Now().Add(200 * time.Millisecond) - var leaders []*Raft - for time.Now().Before(limit) && len(leaders) != 2 { - time.Sleep(10 * time.Millisecond) - leaders = c.GetInState(Leader) - } - if len(leaders) != 2 { - t.Fatalf("expected two leader: %v", leaders) - } - - // Join the new node in - future = leader.AddPeer(c1.rafts[0].localAddr) - if err := future.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Wait until we have 2 followers - limit = time.Now().Add(200 * time.Millisecond) - var followers []*Raft - for time.Now().Before(limit) && len(followers) != 2 { - time.Sleep(10 * time.Millisecond) - followers = c.GetInState(Follower) - } - if len(followers) != 2 { - t.Fatalf("expected two followers: %v", followers) - } - - // Check the FSMs - c.EnsureSame(t) - - // Check the peers - c.EnsureSamePeers(t) - - // Ensure one leader - leader = c.Leader() - c.EnsureLeader(t, leader.localAddr) -} - -func TestRaft_RemoveFollower(t *testing.T) { - // Make a cluster - c := MakeCluster(3, t, nil) - defer c.Close() - - // Get the leader - leader := c.Leader() - - // Wait until we have 2 followers - limit := time.Now().Add(200 * time.Millisecond) - var followers []*Raft - for time.Now().Before(limit) && len(followers) != 2 { - time.Sleep(10 * time.Millisecond) - followers = c.GetInState(Follower) - } - if len(followers) != 2 { - t.Fatalf("expected two followers: %v", followers) - } - - // Remove a follower - follower := followers[0] - future := leader.RemovePeer(follower.localAddr) - if err := future.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Wait a while - time.Sleep(20 * time.Millisecond) - - // Other nodes should have fewer peers - if peers, _ := leader.peerStore.Peers(); len(peers) != 2 { - t.Fatalf("too many peers") - } - if peers, _ := followers[1].peerStore.Peers(); len(peers) != 2 { - t.Fatalf("too many peers") - } -} - -func TestRaft_RemoveLeader(t *testing.T) { - // Make a cluster - c := MakeCluster(3, t, nil) - defer c.Close() - - // Get the leader - leader := c.Leader() - - // Wait until we have 2 followers - limit := time.Now().Add(200 * time.Millisecond) - var followers []*Raft - for time.Now().Before(limit) && len(followers) != 2 { - time.Sleep(10 * time.Millisecond) - followers = c.GetInState(Follower) - } - if len(followers) != 2 { - t.Fatalf("expected two followers: %v", followers) - } - - // Remove the leader - leader.RemovePeer(leader.localAddr) - - // Wait a while - time.Sleep(20 * time.Millisecond) - - // Should have a new leader - newLeader := c.Leader() - - // Wait a bit for log application - time.Sleep(20 * time.Millisecond) - - // Other nodes should have fewer peers - if peers, _ := newLeader.peerStore.Peers(); len(peers) != 2 { - t.Fatalf("too many peers") - } - - // Old leader should be shutdown - if leader.State() != Shutdown { - t.Fatalf("leader should be shutdown") - } - - // Old leader should have no peers - if peers, _ := leader.peerStore.Peers(); len(peers) != 1 { - t.Fatalf("leader should have no peers") - } -} - -func TestRaft_RemoveLeader_NoShutdown(t *testing.T) { - // Make a cluster - conf := inmemConfig() - conf.ShutdownOnRemove = false - c := MakeCluster(3, t, conf) - defer c.Close() - - // Get the leader - leader := c.Leader() - - // Wait until we have 2 followers - limit := time.Now().Add(200 * time.Millisecond) - var followers []*Raft - for time.Now().Before(limit) && len(followers) != 2 { - time.Sleep(10 * time.Millisecond) - followers = c.GetInState(Follower) - } - if len(followers) != 2 { - t.Fatalf("expected two followers: %v", followers) - } - - // Remove the leader - leader.RemovePeer(leader.localAddr) - - // Wait a while - time.Sleep(20 * time.Millisecond) - - // Should have a new leader - newLeader := c.Leader() - - // Wait a bit for log application - time.Sleep(20 * time.Millisecond) - - // Other nodes should have fewer peers - if peers, _ := newLeader.peerStore.Peers(); len(peers) != 2 { - t.Fatalf("too many peers") - } - - // Old leader should be a follower - if leader.State() != Follower { - t.Fatalf("leader should be shutdown") - } - - // Old leader should have no peers - if peers, _ := leader.peerStore.Peers(); len(peers) != 1 { - t.Fatalf("leader should have no peers") - } -} - -func TestRaft_RemoveLeader_SplitCluster(t *testing.T) { - // Enable operation after a remove - conf := inmemConfig() - conf.EnableSingleNode = true - conf.ShutdownOnRemove = false - conf.DisableBootstrapAfterElect = false - - // Make a cluster - c := MakeCluster(3, t, conf) - defer c.Close() - - // Get the leader - leader := c.Leader() - - // Remove the leader - leader.RemovePeer(leader.localAddr) - - // Wait until we have 2 leaders - limit := time.Now().Add(200 * time.Millisecond) - var leaders []*Raft - for time.Now().Before(limit) && len(leaders) != 2 { - time.Sleep(10 * time.Millisecond) - leaders = c.GetInState(Leader) - } - if len(leaders) != 2 { - t.Fatalf("expected two leader: %v", leaders) - } - - // Old leader should have no peers - if len(leader.peers) != 0 { - t.Fatalf("leader should have no peers") - } -} - -func TestRaft_AddKnownPeer(t *testing.T) { - // Make a cluster - c := MakeCluster(3, t, nil) - defer c.Close() - - // Get the leader - leader := c.Leader() - followers := c.GetInState(Follower) - - // Add a follower - future := leader.AddPeer(followers[0].localAddr) - - // Should be already added - if err := future.Error(); err != ErrKnownPeer { - t.Fatalf("err: %v", err) - } -} - -func TestRaft_RemoveUnknownPeer(t *testing.T) { - // Make a cluster - c := MakeCluster(3, t, nil) - defer c.Close() - - // Get the leader - leader := c.Leader() - - // Remove unknown - future := leader.RemovePeer(NewInmemAddr()) - - // Should be already added - if err := future.Error(); err != ErrUnknownPeer { - t.Fatalf("err: %v", err) - } -} - -func TestRaft_SnapshotRestore(t *testing.T) { - // Make the cluster - conf := inmemConfig() - conf.TrailingLogs = 10 - c := MakeCluster(1, t, conf) - defer c.Close() - - // Commit a lot of things - leader := c.Leader() - var future Future - for i := 0; i < 100; i++ { - future = leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) - } - - // Wait for the last future to apply - if err := future.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Take a snapshot - snapFuture := leader.Snapshot() - if err := snapFuture.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Check for snapshot - if snaps, _ := leader.snapshots.List(); len(snaps) != 1 { - t.Fatalf("should have a snapshot") - } - - // Logs should be trimmed - if idx, _ := leader.logs.FirstIndex(); idx != 92 { - t.Fatalf("should trim logs to 92: %d", idx) - } - - // Shutdown - shutdown := leader.Shutdown() - if err := shutdown.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Restart the Raft - r := leader - r, err := NewRaft(r.conf, r.fsm, r.logs, r.stable, - r.snapshots, r.peerStore, r.trans) - if err != nil { - t.Fatalf("err: %v", err) - } - c.rafts[0] = r - - // We should have restored from the snapshot! - if last := r.getLastApplied(); last != 101 { - t.Fatalf("bad last: %v", last) - } -} - -func TestRaft_SnapshotRestore_PeerChange(t *testing.T) { - // Make the cluster - conf := inmemConfig() - conf.TrailingLogs = 10 - c := MakeCluster(3, t, conf) - defer c.Close() - - // Commit a lot of things - leader := c.Leader() - var future Future - for i := 0; i < 100; i++ { - future = leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) - } - - // Wait for the last future to apply - if err := future.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Take a snapshot - snapFuture := leader.Snapshot() - if err := snapFuture.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Shutdown - shutdown := leader.Shutdown() - if err := shutdown.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Make a separate cluster - c2 := MakeClusterNoPeers(2, t, conf) - defer c2.Close() - - // Kill the old cluster - for _, sec := range c.rafts { - if sec != leader { - sec.Shutdown() - } - } - - // Change the peer addresses - peers := []string{leader.trans.LocalAddr()} - for _, sec := range c2.rafts { - peers = append(peers, sec.trans.LocalAddr()) - } - - // Restart the Raft with new peers - r := leader - peerStore := &StaticPeers{StaticPeers: peers} - r, err := NewRaft(r.conf, r.fsm, r.logs, r.stable, - r.snapshots, peerStore, r.trans) - if err != nil { - t.Fatalf("err: %v", err) - } - c.rafts[0] = r - c2.rafts = append(c2.rafts, r) - c2.trans = append(c2.trans, r.trans.(*InmemTransport)) - c2.fsms = append(c2.fsms, r.fsm.(*MockFSM)) - c2.FullyConnect() - - // Wait a while - time.Sleep(50 * time.Millisecond) - - // Ensure we elect a leader, and that we replicate - // to our new followers - c2.EnsureSame(t) - - // We should have restored from the snapshot! - if last := r.getLastApplied(); last != 102 { - t.Fatalf("bad last: %v", last) - } -} - -func TestRaft_AutoSnapshot(t *testing.T) { - // Make the cluster - conf := inmemConfig() - conf.SnapshotInterval = 5 * time.Millisecond - conf.SnapshotThreshold = 50 - conf.TrailingLogs = 10 - c := MakeCluster(1, t, conf) - defer c.Close() - - // Commit a lot of things - leader := c.Leader() - var future Future - for i := 0; i < 100; i++ { - future = leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) - } - - // Wait for the last future to apply - if err := future.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Wait for a snapshot to happen - time.Sleep(50 * time.Millisecond) - - // Check for snapshot - if snaps, _ := leader.snapshots.List(); len(snaps) == 0 { - t.Fatalf("should have a snapshot") - } -} - -func TestRaft_SendSnapshotFollower(t *testing.T) { - // Make the cluster - conf := inmemConfig() - conf.TrailingLogs = 10 - c := MakeCluster(3, t, conf) - defer c.Close() - - // Disconnect one follower - followers := c.GetInState(Follower) - behind := followers[0] - c.Disconnect(behind.localAddr) - - // Commit a lot of things - leader := c.Leader() - var future Future - for i := 0; i < 100; i++ { - future = leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) - } - - // Wait for the last future to apply - if err := future.Error(); err != nil { - t.Fatalf("err: %v", err) - } else { - log.Printf("[INFO] Finished apply without behind follower") - } - - // Snapshot, this will truncate logs! - for _, r := range c.rafts { - future = r.Snapshot() - if err := future.Error(); err != nil { - t.Fatalf("err: %v", err) - } - } - - // Reconnect the behind node - c.FullyConnect() - - // Ensure all the logs are the same - c.EnsureSame(t) -} - -func TestRaft_ReJoinFollower(t *testing.T) { - // Enable operation after a remove - conf := inmemConfig() - conf.ShutdownOnRemove = false - - // Make a cluster - c := MakeCluster(3, t, conf) - defer c.Close() - - // Get the leader - leader := c.Leader() - - // Wait until we have 2 followers - limit := time.Now().Add(200 * time.Millisecond) - var followers []*Raft - for time.Now().Before(limit) && len(followers) != 2 { - time.Sleep(10 * time.Millisecond) - followers = c.GetInState(Follower) - } - if len(followers) != 2 { - t.Fatalf("expected two followers: %v", followers) - } - - // Remove a follower - follower := followers[0] - future := leader.RemovePeer(follower.localAddr) - if err := future.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Wait a while - time.Sleep(20 * time.Millisecond) - - // Other nodes should have fewer peers - if peers, _ := leader.peerStore.Peers(); len(peers) != 2 { - t.Fatalf("too many peers: %v", peers) - } - if peers, _ := followers[1].peerStore.Peers(); len(peers) != 2 { - t.Fatalf("too many peers: %v", peers) - } - - // Get the leader - time.Sleep(20 * time.Millisecond) - leader = c.Leader() - - // Rejoin. The follower will have a higher term than the leader, - // this will cause the leader to step down, and a new round of elections - // to take place. We should eventually re-stabilize. - future = leader.AddPeer(follower.localAddr) - if err := future.Error(); err != nil && err != ErrLeadershipLost { - t.Fatalf("err: %v", err) - } - - // Wait a while - time.Sleep(40 * time.Millisecond) - - // Other nodes should have fewer peers - if peers, _ := leader.peerStore.Peers(); len(peers) != 3 { - t.Fatalf("missing peers: %v", peers) - } - if peers, _ := followers[1].peerStore.Peers(); len(peers) != 3 { - t.Fatalf("missing peers: %v", peers) - } - - // Should be a follower now - if follower.State() != Follower { - t.Fatalf("bad state: %v", follower.State()) - } -} - -func TestRaft_LeaderLeaseExpire(t *testing.T) { - // Make a cluster - conf := inmemConfig() - c := MakeCluster(2, t, conf) - defer c.Close() - - // Get the leader - leader := c.Leader() - - // Wait until we have a followers - limit := time.Now().Add(200 * time.Millisecond) - var followers []*Raft - for time.Now().Before(limit) && len(followers) != 1 { - time.Sleep(10 * time.Millisecond) - followers = c.GetInState(Follower) - } - if len(followers) != 1 { - t.Fatalf("expected a followers: %v", followers) - } - - // Disconnect the follower now - follower := followers[0] - log.Printf("[INFO] Disconnecting %v", follower) - c.Disconnect(follower.localAddr) - - // Watch the leaderCh - select { - case v := <-leader.LeaderCh(): - if v { - t.Fatalf("should step down as leader") - } - case <-time.After(conf.LeaderLeaseTimeout * 2): - t.Fatalf("timeout stepping down as leader") - } - - // Should be no leaders - if len(c.GetInState(Leader)) != 0 { - t.Fatalf("expected step down") - } - - // Verify no further contact - last := follower.LastContact() - time.Sleep(50 * time.Millisecond) - - // Check that last contact has not changed - if last != follower.LastContact() { - t.Fatalf("unexpected further contact") - } - - // Ensure both have cleared their leader - if l := leader.Leader(); l != "" { - t.Fatalf("bad: %v", l) - } - if l := follower.Leader(); l != "" { - t.Fatalf("bad: %v", l) - } -} - -func TestRaft_Barrier(t *testing.T) { - // Make the cluster - c := MakeCluster(3, t, nil) - defer c.Close() - - // Get the leader - leader := c.Leader() - - // Commit a lot of things - for i := 0; i < 100; i++ { - leader.Apply([]byte(fmt.Sprintf("test%d", i)), 0) - } - - // Wait for a barrier complete - barrier := leader.Barrier(0) - - // Wait for the barrier future to apply - if err := barrier.Error(); err != nil { - t.Fatalf("err: %v", err) - } - - // Ensure all the logs are the same - c.EnsureSame(t) - if len(c.fsms[0].logs) != 100 { - t.Fatalf("Bad log length") - } -} - -func TestRaft_VerifyLeader(t *testing.T) { - // Make the cluster - c := MakeCluster(3, t, nil) - defer c.Close() - - // Get the leader - leader := c.Leader() - - // Verify we are leader - verify := leader.VerifyLeader() - - // Wait for the verify to apply - if err := verify.Error(); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestRaft_VerifyLeader_Single(t *testing.T) { - // Make the cluster - c := MakeCluster(1, t, nil) - defer c.Close() - - // Get the leader - leader := c.Leader() - - // Verify we are leader - verify := leader.VerifyLeader() - - // Wait for the verify to apply - if err := verify.Error(); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestRaft_VerifyLeader_Fail(t *testing.T) { - // Make a cluster - conf := inmemConfig() - c := MakeCluster(2, t, conf) - defer c.Close() - - // Get the leader - leader := c.Leader() - - // Wait until we have a followers - limit := time.Now().Add(200 * time.Millisecond) - var followers []*Raft - for time.Now().Before(limit) && len(followers) != 1 { - time.Sleep(10 * time.Millisecond) - followers = c.GetInState(Follower) - } - if len(followers) != 1 { - t.Fatalf("expected a followers: %v", followers) - } - - // Force follower to different term - follower := followers[0] - follower.setCurrentTerm(follower.getCurrentTerm() + 1) - - // Verify we are leader - verify := leader.VerifyLeader() - - // Wait for the leader to step down - if err := verify.Error(); err != ErrNotLeader && err != ErrLeadershipLost { - t.Fatalf("err: %v", err) - } - - // Ensure the known leader is cleared - if l := leader.Leader(); l != "" { - t.Fatalf("bad: %v", l) - } -} - -func TestRaft_VerifyLeader_ParitalConnect(t *testing.T) { - // Make a cluster - conf := inmemConfig() - c := MakeCluster(3, t, conf) - defer c.Close() - - // Get the leader - leader := c.Leader() - - // Wait until we have a followers - limit := time.Now().Add(200 * time.Millisecond) - var followers []*Raft - for time.Now().Before(limit) && len(followers) != 2 { - time.Sleep(10 * time.Millisecond) - followers = c.GetInState(Follower) - } - if len(followers) != 2 { - t.Fatalf("expected a followers: %v", followers) - } - - // Force partial disconnect - follower := followers[0] - log.Printf("[INFO] Disconnecting %v", follower) - c.Disconnect(follower.localAddr) - - // Verify we are leader - verify := leader.VerifyLeader() - - // Wait for the leader to step down - if err := verify.Error(); err != nil { - t.Fatalf("err: %v", err) - } -} - -func TestRaft_SettingPeers(t *testing.T) { - // Make the cluster - c := MakeClusterNoPeers(3, t, nil) - defer c.Close() - - peers := make([]string, 0) - for _, v := range c.rafts { - peers = append(peers, v.localAddr) - } - - for _, v := range c.rafts { - future := v.SetPeers(peers) - if err := future.Error(); err != nil { - t.Fatalf("error setting peers: %v", err) - } - } - - // Wait a while - time.Sleep(20 * time.Millisecond) - - // Should have a new leader - if leader := c.Leader(); leader == nil { - t.Fatalf("no leader?") - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/replication.go b/Godeps/_workspace/src/github.com/hashicorp/raft/replication.go deleted file mode 100644 index 30541952d..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/replication.go +++ /dev/null @@ -1,513 +0,0 @@ -package raft - -import ( - "errors" - "fmt" - "sync" - "time" - - "github.com/armon/go-metrics" -) - -const ( - maxFailureScale = 12 - failureWait = 10 * time.Millisecond -) - -var ( - // ErrLogNotFound indicates a given log entry is not available. - ErrLogNotFound = errors.New("log not found") - - // ErrPipelineReplicationNotSupported can be returned by the transport to - // signal that pipeline replication is not supported in general, and that - // no error message should be produced. - ErrPipelineReplicationNotSupported = errors.New("pipeline replication not supported") -) - -type followerReplication struct { - peer string - inflight *inflight - - stopCh chan uint64 - triggerCh chan struct{} - - currentTerm uint64 - matchIndex uint64 - nextIndex uint64 - - lastContact time.Time - lastContactLock sync.RWMutex - - failures uint64 - - notifyCh chan struct{} - notify []*verifyFuture - notifyLock sync.Mutex - - // stepDown is used to indicate to the leader that we - // should step down based on information from a follower. - stepDown chan struct{} - - // allowPipeline is used to control it seems like - // pipeline replication should be enabled. - allowPipeline bool -} - -// notifyAll is used to notify all the waiting verify futures -// if the follower believes we are still the leader. -func (s *followerReplication) notifyAll(leader bool) { - // Clear the waiting notifies minimizing lock time - s.notifyLock.Lock() - n := s.notify - s.notify = nil - s.notifyLock.Unlock() - - // Submit our votes - for _, v := range n { - v.vote(leader) - } -} - -// LastContact returns the time of last contact. -func (s *followerReplication) LastContact() time.Time { - s.lastContactLock.RLock() - last := s.lastContact - s.lastContactLock.RUnlock() - return last -} - -// setLastContact sets the last contact to the current time. -func (s *followerReplication) setLastContact() { - s.lastContactLock.Lock() - s.lastContact = time.Now() - s.lastContactLock.Unlock() -} - -// replicate is a long running routine that is used to manage -// the process of replicating logs to our followers. -func (r *Raft) replicate(s *followerReplication) { - // Start an async heartbeating routing - stopHeartbeat := make(chan struct{}) - defer close(stopHeartbeat) - r.goFunc(func() { r.heartbeat(s, stopHeartbeat) }) - -RPC: - shouldStop := false - for !shouldStop { - select { - case maxIndex := <-s.stopCh: - // Make a best effort to replicate up to this index - if maxIndex > 0 { - r.replicateTo(s, maxIndex) - } - return - case <-s.triggerCh: - shouldStop = r.replicateTo(s, r.getLastLogIndex()) - case <-randomTimeout(r.conf.CommitTimeout): - shouldStop = r.replicateTo(s, r.getLastLogIndex()) - } - - // If things looks healthy, switch to pipeline mode - if !shouldStop && s.allowPipeline { - goto PIPELINE - } - } - return - -PIPELINE: - // Disable until re-enabled - s.allowPipeline = false - - // Replicates using a pipeline for high performance. This method - // is not able to gracefully recover from errors, and so we fall back - // to standard mode on failure. - if err := r.pipelineReplicate(s); err != nil { - if err != ErrPipelineReplicationNotSupported { - r.logger.Printf("[ERR] raft: Failed to start pipeline replication to %s: %s", s.peer, err) - } - } - goto RPC -} - -// replicateTo is used to replicate the logs up to a given last index. -// If the follower log is behind, we take care to bring them up to date. -func (r *Raft) replicateTo(s *followerReplication, lastIndex uint64) (shouldStop bool) { - // Create the base request - var req AppendEntriesRequest - var resp AppendEntriesResponse - var start time.Time -START: - // Prevent an excessive retry rate on errors - if s.failures > 0 { - select { - case <-time.After(backoff(failureWait, s.failures, maxFailureScale)): - case <-r.shutdownCh: - } - } - - // Setup the request - if err := r.setupAppendEntries(s, &req, s.nextIndex, lastIndex); err == ErrLogNotFound { - goto SEND_SNAP - } else if err != nil { - return - } - - // Make the RPC call - start = time.Now() - if err := r.trans.AppendEntries(s.peer, &req, &resp); err != nil { - r.logger.Printf("[ERR] raft: Failed to AppendEntries to %v: %v", s.peer, err) - s.failures++ - return - } - appendStats(s.peer, start, float32(len(req.Entries))) - - // Check for a newer term, stop running - if resp.Term > req.Term { - r.handleStaleTerm(s) - return true - } - - // Update the last contact - s.setLastContact() - - // Update s based on success - if resp.Success { - // Update our replication state - updateLastAppended(s, &req) - - // Clear any failures, allow pipelining - s.failures = 0 - s.allowPipeline = true - } else { - s.nextIndex = max(min(s.nextIndex-1, resp.LastLog+1), 1) - s.matchIndex = s.nextIndex - 1 - s.failures++ - r.logger.Printf("[WARN] raft: AppendEntries to %v rejected, sending older logs (next: %d)", s.peer, s.nextIndex) - } - -CHECK_MORE: - // Check if there are more logs to replicate - if s.nextIndex <= lastIndex { - goto START - } - return - - // SEND_SNAP is used when we fail to get a log, usually because the follower - // is too far behind, and we must ship a snapshot down instead -SEND_SNAP: - if stop, err := r.sendLatestSnapshot(s); stop { - return true - } else if err != nil { - r.logger.Printf("[ERR] raft: Failed to send snapshot to %v: %v", s.peer, err) - return - } - - // Check if there is more to replicate - goto CHECK_MORE -} - -// sendLatestSnapshot is used to send the latest snapshot we have -// down to our follower. -func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) { - // Get the snapshots - snapshots, err := r.snapshots.List() - if err != nil { - r.logger.Printf("[ERR] raft: Failed to list snapshots: %v", err) - return false, err - } - - // Check we have at least a single snapshot - if len(snapshots) == 0 { - return false, fmt.Errorf("no snapshots found") - } - - // Open the most recent snapshot - snapID := snapshots[0].ID - meta, snapshot, err := r.snapshots.Open(snapID) - if err != nil { - r.logger.Printf("[ERR] raft: Failed to open snapshot %v: %v", snapID, err) - return false, err - } - defer snapshot.Close() - - // Setup the request - req := InstallSnapshotRequest{ - Term: s.currentTerm, - Leader: r.trans.EncodePeer(r.localAddr), - LastLogIndex: meta.Index, - LastLogTerm: meta.Term, - Peers: meta.Peers, - Size: meta.Size, - } - - // Make the call - start := time.Now() - var resp InstallSnapshotResponse - if err := r.trans.InstallSnapshot(s.peer, &req, &resp, snapshot); err != nil { - r.logger.Printf("[ERR] raft: Failed to install snapshot %v: %v", snapID, err) - s.failures++ - return false, err - } - metrics.MeasureSince([]string{"raft", "replication", "installSnapshot", s.peer}, start) - - // Check for a newer term, stop running - if resp.Term > req.Term { - r.handleStaleTerm(s) - return true, nil - } - - // Update the last contact - s.setLastContact() - - // Check for success - if resp.Success { - // Mark any inflight logs as committed - s.inflight.CommitRange(s.matchIndex+1, meta.Index) - - // Update the indexes - s.matchIndex = meta.Index - s.nextIndex = s.matchIndex + 1 - - // Clear any failures - s.failures = 0 - - // Notify we are still leader - s.notifyAll(true) - } else { - s.failures++ - r.logger.Printf("[WARN] raft: InstallSnapshot to %v rejected", s.peer) - } - return false, nil -} - -// heartbeat is used to periodically invoke AppendEntries on a peer -// to ensure they don't time out. This is done async of replicate(), -// since that routine could potentially be blocked on disk IO. -func (r *Raft) heartbeat(s *followerReplication, stopCh chan struct{}) { - var failures uint64 - req := AppendEntriesRequest{ - Term: s.currentTerm, - Leader: r.trans.EncodePeer(r.localAddr), - } - var resp AppendEntriesResponse - for { - // Wait for the next heartbeat interval or forced notify - select { - case <-s.notifyCh: - case <-randomTimeout(r.conf.HeartbeatTimeout / 10): - case <-stopCh: - return - } - - start := time.Now() - if err := r.trans.AppendEntries(s.peer, &req, &resp); err != nil { - r.logger.Printf("[ERR] raft: Failed to heartbeat to %v: %v", s.peer, err) - failures++ - select { - case <-time.After(backoff(failureWait, failures, maxFailureScale)): - case <-stopCh: - } - } else { - s.setLastContact() - failures = 0 - metrics.MeasureSince([]string{"raft", "replication", "heartbeat", s.peer}, start) - s.notifyAll(resp.Success) - } - } -} - -// pipelineReplicate is used when we have synchronized our state with the follower, -// and want to switch to a higher performance pipeline mode of replication. -// We only pipeline AppendEntries commands, and if we ever hit an error, we fall -// back to the standard replication which can handle more complex situations. -func (r *Raft) pipelineReplicate(s *followerReplication) error { - // Create a new pipeline - pipeline, err := r.trans.AppendEntriesPipeline(s.peer) - if err != nil { - return err - } - defer pipeline.Close() - - // Log start and stop of pipeline - r.logger.Printf("[INFO] raft: pipelining replication to peer %v", s.peer) - defer r.logger.Printf("[INFO] raft: aborting pipeline replication to peer %v", s.peer) - - // Create a shutdown and finish channel - stopCh := make(chan struct{}) - finishCh := make(chan struct{}) - - // Start a dedicated decoder - r.goFunc(func() { r.pipelineDecode(s, pipeline, stopCh, finishCh) }) - - // Start pipeline sends at the last good nextIndex - nextIndex := s.nextIndex - - shouldStop := false -SEND: - for !shouldStop { - select { - case <-finishCh: - break SEND - case maxIndex := <-s.stopCh: - if maxIndex > 0 { - r.pipelineSend(s, pipeline, &nextIndex, maxIndex) - } - break SEND - case <-s.triggerCh: - shouldStop = r.pipelineSend(s, pipeline, &nextIndex, r.getLastLogIndex()) - case <-randomTimeout(r.conf.CommitTimeout): - shouldStop = r.pipelineSend(s, pipeline, &nextIndex, r.getLastLogIndex()) - } - } - - // Stop our decoder, and wait for it to finish - close(stopCh) - select { - case <-finishCh: - case <-r.shutdownCh: - } - return nil -} - -// pipelineSend is used to send data over a pipeline. -func (r *Raft) pipelineSend(s *followerReplication, p AppendPipeline, nextIdx *uint64, lastIndex uint64) (shouldStop bool) { - // Create a new append request - req := new(AppendEntriesRequest) - if err := r.setupAppendEntries(s, req, *nextIdx, lastIndex); err != nil { - return true - } - - // Pipeline the append entries - if _, err := p.AppendEntries(req, new(AppendEntriesResponse)); err != nil { - r.logger.Printf("[ERR] raft: Failed to pipeline AppendEntries to %v: %v", s.peer, err) - return true - } - - // Increase the next send log to avoid re-sending old logs - if n := len(req.Entries); n > 0 { - last := req.Entries[n-1] - *nextIdx = last.Index + 1 - } - return false -} - -// pipelineDecode is used to decode the responses of pipelined requests. -func (r *Raft) pipelineDecode(s *followerReplication, p AppendPipeline, stopCh, finishCh chan struct{}) { - defer close(finishCh) - respCh := p.Consumer() - for { - select { - case ready := <-respCh: - req, resp := ready.Request(), ready.Response() - appendStats(s.peer, ready.Start(), float32(len(req.Entries))) - - // Check for a newer term, stop running - if resp.Term > req.Term { - r.handleStaleTerm(s) - return - } - - // Update the last contact - s.setLastContact() - - // Abort pipeline if not successful - if !resp.Success { - return - } - - // Update our replication state - updateLastAppended(s, req) - case <-stopCh: - return - } - } -} - -// setupAppendEntries is used to setup an append entries request. -func (r *Raft) setupAppendEntries(s *followerReplication, req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { - req.Term = s.currentTerm - req.Leader = r.trans.EncodePeer(r.localAddr) - req.LeaderCommitIndex = r.getCommitIndex() - if err := r.setPreviousLog(req, nextIndex); err != nil { - return err - } - if err := r.setNewLogs(req, nextIndex, lastIndex); err != nil { - return err - } - return nil -} - -// setPreviousLog is used to setup the PrevLogEntry and PrevLogTerm for an -// AppendEntriesRequest given the next index to replicate. -func (r *Raft) setPreviousLog(req *AppendEntriesRequest, nextIndex uint64) error { - // Guard for the first index, since there is no 0 log entry - // Guard against the previous index being a snapshot as well - if nextIndex == 1 { - req.PrevLogEntry = 0 - req.PrevLogTerm = 0 - - } else if (nextIndex - 1) == r.getLastSnapshotIndex() { - req.PrevLogEntry = r.getLastSnapshotIndex() - req.PrevLogTerm = r.getLastSnapshotTerm() - - } else { - var l Log - if err := r.logs.GetLog(nextIndex-1, &l); err != nil { - r.logger.Printf("[ERR] raft: Failed to get log at index %d: %v", - nextIndex-1, err) - return err - } - - // Set the previous index and term (0 if nextIndex is 1) - req.PrevLogEntry = l.Index - req.PrevLogTerm = l.Term - } - return nil -} - -// setNewLogs is used to setup the logs which should be appended for a request. -func (r *Raft) setNewLogs(req *AppendEntriesRequest, nextIndex, lastIndex uint64) error { - // Append up to MaxAppendEntries or up to the lastIndex - req.Entries = make([]*Log, 0, r.conf.MaxAppendEntries) - maxIndex := min(nextIndex+uint64(r.conf.MaxAppendEntries)-1, lastIndex) - for i := nextIndex; i <= maxIndex; i++ { - oldLog := new(Log) - if err := r.logs.GetLog(i, oldLog); err != nil { - r.logger.Printf("[ERR] raft: Failed to get log at index %d: %v", i, err) - return err - } - req.Entries = append(req.Entries, oldLog) - } - return nil -} - -// appendStats is used to emit stats about an AppendEntries invocation. -func appendStats(peer string, start time.Time, logs float32) { - metrics.MeasureSince([]string{"raft", "replication", "appendEntries", "rpc", peer}, start) - metrics.IncrCounter([]string{"raft", "replication", "appendEntries", "logs", peer}, logs) -} - -// handleStaleTerm is used when a follower indicates that we have a stale term. -func (r *Raft) handleStaleTerm(s *followerReplication) { - r.logger.Printf("[ERR] raft: peer %v has newer term, stopping replication", s.peer) - s.notifyAll(false) // No longer leader - asyncNotifyCh(s.stepDown) -} - -// updateLastAppended is used to update follower replication state after a successful -// AppendEntries RPC. -func updateLastAppended(s *followerReplication, req *AppendEntriesRequest) { - // Mark any inflight logs as committed - if logs := req.Entries; len(logs) > 0 { - first := logs[0] - last := logs[len(logs)-1] - s.inflight.CommitRange(first.Index, last.Index) - - // Update the indexes - s.matchIndex = last.Index - s.nextIndex = last.Index + 1 - } - - // Notify still leader - s.notifyAll(true) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/snapshot.go b/Godeps/_workspace/src/github.com/hashicorp/raft/snapshot.go deleted file mode 100644 index 7151f43ce..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/snapshot.go +++ /dev/null @@ -1,40 +0,0 @@ -package raft - -import ( - "io" -) - -// SnapshotMeta is for metadata of a snapshot. -type SnapshotMeta struct { - ID string // ID is opaque to the store, and is used for opening - Index uint64 - Term uint64 - Peers []byte - Size int64 -} - -// SnapshotStore interface is used to allow for flexible implementations -// of snapshot storage and retrieval. For example, a client could implement -// a shared state store such as S3, allowing new nodes to restore snapshots -// without steaming from the leader. -type SnapshotStore interface { - // Create is used to begin a snapshot at a given index and term, - // with the current peer set already encoded. - Create(index, term uint64, peers []byte) (SnapshotSink, error) - - // List is used to list the available snapshots in the store. - // It should return then in descending order, with the highest index first. - List() ([]*SnapshotMeta, error) - - // Open takes a snapshot ID and provides a ReadCloser. Once close is - // called it is assumed the snapshot is no longer needed. - Open(id string) (*SnapshotMeta, io.ReadCloser, error) -} - -// SnapshotSink is returned by StartSnapshot. The FSM will Write state -// to the sink and call Close on completion. On error, Cancel will be invoked. -type SnapshotSink interface { - io.WriteCloser - ID() string - Cancel() error -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/stable.go b/Godeps/_workspace/src/github.com/hashicorp/raft/stable.go deleted file mode 100644 index 4588ea8a9..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/stable.go +++ /dev/null @@ -1,15 +0,0 @@ -package raft - -// StableStore is used to provide stable storage -// of key configurations to ensure safety. -type StableStore interface { - Set(key []byte, val []byte) error - - // Get returns the value for key, or an empty byte slice if key was not found. - Get(key []byte) ([]byte, error) - - SetUint64(key []byte, val uint64) error - - // GetUint64 returns the uint64 value for key, or 0 if key was not found. - GetUint64(key []byte) (uint64, error) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/state.go b/Godeps/_workspace/src/github.com/hashicorp/raft/state.go deleted file mode 100644 index 41e80a1b5..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/state.go +++ /dev/null @@ -1,169 +0,0 @@ -package raft - -import ( - "sync/atomic" -) - -// RaftState captures the state of a Raft node: Follower, Candidate, Leader, -// or Shutdown. -type RaftState uint32 - -const ( - // Follower is the initial state of a Raft node. - Follower RaftState = iota - - // Candidate is one of the valid states of a Raft node. - Candidate - - // Leader is one of the valid states of a Raft node. - Leader - - // Shutdown is the terminal state of a Raft node. - Shutdown -) - -func (s RaftState) String() string { - switch s { - case Follower: - return "Follower" - case Candidate: - return "Candidate" - case Leader: - return "Leader" - case Shutdown: - return "Shutdown" - default: - return "Unknown" - } -} - -// raftState is used to maintain various state variables -// and provides an interface to set/get the variables in a -// thread safe manner. -type raftState struct { - // The current term, cache of StableStore - currentTerm uint64 - - // Cache the latest log from LogStore - LastLogIndex uint64 - LastLogTerm uint64 - - // Highest committed log entry - commitIndex uint64 - - // Last applied log to the FSM - lastApplied uint64 - - // Cache the latest snapshot index/term - lastSnapshotIndex uint64 - lastSnapshotTerm uint64 - - // Tracks the number of live routines - runningRoutines int32 - - // The current state - state RaftState -} - -func (r *raftState) getState() RaftState { - stateAddr := (*uint32)(&r.state) - return RaftState(atomic.LoadUint32(stateAddr)) -} - -func (r *raftState) setState(s RaftState) { - stateAddr := (*uint32)(&r.state) - atomic.StoreUint32(stateAddr, uint32(s)) -} - -func (r *raftState) getCurrentTerm() uint64 { - return atomic.LoadUint64(&r.currentTerm) -} - -func (r *raftState) setCurrentTerm(term uint64) { - atomic.StoreUint64(&r.currentTerm, term) -} - -func (r *raftState) getLastLogIndex() uint64 { - return atomic.LoadUint64(&r.LastLogIndex) -} - -func (r *raftState) setLastLogIndex(term uint64) { - atomic.StoreUint64(&r.LastLogIndex, term) -} - -func (r *raftState) getLastLogTerm() uint64 { - return atomic.LoadUint64(&r.LastLogTerm) -} - -func (r *raftState) setLastLogTerm(term uint64) { - atomic.StoreUint64(&r.LastLogTerm, term) -} - -func (r *raftState) getCommitIndex() uint64 { - return atomic.LoadUint64(&r.commitIndex) -} - -func (r *raftState) setCommitIndex(term uint64) { - atomic.StoreUint64(&r.commitIndex, term) -} - -func (r *raftState) getLastApplied() uint64 { - return atomic.LoadUint64(&r.lastApplied) -} - -func (r *raftState) setLastApplied(term uint64) { - atomic.StoreUint64(&r.lastApplied, term) -} - -func (r *raftState) getLastSnapshotIndex() uint64 { - return atomic.LoadUint64(&r.lastSnapshotIndex) -} - -func (r *raftState) setLastSnapshotIndex(term uint64) { - atomic.StoreUint64(&r.lastSnapshotIndex, term) -} - -func (r *raftState) getLastSnapshotTerm() uint64 { - return atomic.LoadUint64(&r.lastSnapshotTerm) -} - -func (r *raftState) setLastSnapshotTerm(term uint64) { - atomic.StoreUint64(&r.lastSnapshotTerm, term) -} - -func (r *raftState) incrRoutines() { - atomic.AddInt32(&r.runningRoutines, 1) -} - -func (r *raftState) decrRoutines() { - atomic.AddInt32(&r.runningRoutines, -1) -} - -func (r *raftState) getRoutines() int32 { - return atomic.LoadInt32(&r.runningRoutines) -} - -// Start a goroutine and properly handle the race between a routine -// starting and incrementing, and exiting and decrementing. -func (r *raftState) goFunc(f func()) { - r.incrRoutines() - go func() { - defer r.decrRoutines() - f() - }() -} - -// getLastIndex returns the last index in stable storage. -// Either from the last log or from the last snapshot. -func (r *raftState) getLastIndex() uint64 { - return max(r.getLastLogIndex(), r.getLastSnapshotIndex()) -} - -// getLastEntry returns the last index and term in stable storage. -// Either from the last log or from the last snapshot. -func (r *raftState) getLastEntry() (uint64, uint64) { - if r.getLastLogIndex() >= r.getLastSnapshotIndex() { - return r.getLastLogIndex(), r.getLastLogTerm() - } - return r.getLastSnapshotIndex(), r.getLastSnapshotTerm() -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport.go b/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport.go deleted file mode 100644 index 1b1ea9c35..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport.go +++ /dev/null @@ -1,80 +0,0 @@ -package raft - -import ( - "errors" - "io" - "net" - "time" -) - -var ( - errNotAdvertisable = errors.New("local bind address is not advertisable") - errNotTCP = errors.New("local address is not a TCP address") -) - -// TCPStreamLayer implements StreamLayer interface for plain TCP. -type TCPStreamLayer struct { - advertise net.Addr - listener *net.TCPListener -} - -// NewTCPTransport returns a NetworkTransport that is built on top of -// a TCP streaming transport layer. -func NewTCPTransport( - bindAddr string, - advertise net.Addr, - maxPool int, - timeout time.Duration, - logOutput io.Writer, -) (*NetworkTransport, error) { - // Try to bind - list, err := net.Listen("tcp", bindAddr) - if err != nil { - return nil, err - } - - // Create stream - stream := &TCPStreamLayer{ - advertise: advertise, - listener: list.(*net.TCPListener), - } - - // Verify that we have a usable advertise address - addr, ok := stream.Addr().(*net.TCPAddr) - if !ok { - list.Close() - return nil, errNotTCP - } - if addr.IP.IsUnspecified() { - list.Close() - return nil, errNotAdvertisable - } - - // Create the network transport - trans := NewNetworkTransport(stream, maxPool, timeout, logOutput) - return trans, nil -} - -// Dial implements the StreamLayer interface. -func (t *TCPStreamLayer) Dial(address string, timeout time.Duration) (net.Conn, error) { - return net.DialTimeout("tcp", address, timeout) -} - -// Accept implements the net.Listener interface. -func (t *TCPStreamLayer) Accept() (c net.Conn, err error) { - return t.listener.Accept() -} - -// Close implements the net.Listener interface. -func (t *TCPStreamLayer) Close() (err error) { - return t.listener.Close() -} - -// Addr implements the net.Listener interface. -func (t *TCPStreamLayer) Addr() net.Addr { - // Use an advertise addr if provided - if t.advertise != nil { - return t.advertise - } - return t.listener.Addr() -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport_test.go deleted file mode 100644 index 22d59da2a..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/tcp_transport_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package raft - -import ( - "net" - "testing" -) - -func TestTCPTransport_BadAddr(t *testing.T) { - _, err := NewTCPTransport("0.0.0.0:0", nil, 1, 0, nil) - if err != errNotAdvertisable { - t.Fatalf("err: %v", err) - } -} - -func TestTCPTransport_WithAdvertise(t *testing.T) { - addr := &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: 12345} - trans, err := NewTCPTransport("0.0.0.0:0", addr, 1, 0, nil) - if err != nil { - t.Fatalf("err: %v", err) - } - if trans.LocalAddr() != "127.0.0.1:12345" { - t.Fatalf("bad: %v", trans.LocalAddr()) - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/transport.go b/Godeps/_workspace/src/github.com/hashicorp/raft/transport.go deleted file mode 100644 index 8928de0c2..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/transport.go +++ /dev/null @@ -1,85 +0,0 @@ -package raft - -import ( - "io" - "time" -) - -// RPCResponse captures both a response and a potential error. -type RPCResponse struct { - Response interface{} - Error error -} - -// RPC has a command, and provides a response mechanism. -type RPC struct { - Command interface{} - Reader io.Reader // Set only for InstallSnapshot - RespChan chan<- RPCResponse -} - -// Respond is used to respond with a response, error or both -func (r *RPC) Respond(resp interface{}, err error) { - r.RespChan <- RPCResponse{resp, err} -} - -// Transport provides an interface for network transports -// to allow Raft to communicate with other nodes. -type Transport interface { - // Consumer returns a channel that can be used to - // consume and respond to RPC requests. - Consumer() <-chan RPC - - // LocalAddr is used to return our local address to distinguish from our peers. - LocalAddr() string - - // AppendEntriesPipeline returns an interface that can be used to pipeline - // AppendEntries requests. - AppendEntriesPipeline(target string) (AppendPipeline, error) - - // AppendEntries sends the appropriate RPC to the target node. - AppendEntries(target string, args *AppendEntriesRequest, resp *AppendEntriesResponse) error - - // RequestVote sends the appropriate RPC to the target node. - RequestVote(target string, args *RequestVoteRequest, resp *RequestVoteResponse) error - - // InstallSnapshot is used to push a snapshot down to a follower. The data is read from - // the ReadCloser and streamed to the client. - InstallSnapshot(target string, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error - - // EncodePeer is used to serialize a peer name. - EncodePeer(string) []byte - - // DecodePeer is used to deserialize a peer name. - DecodePeer([]byte) string - - // SetHeartbeatHandler is used to setup a heartbeat handler - // as a fast-pass. This is to avoid head-of-line blocking from - // disk IO. If a Transport does not support this, it can simply - // ignore the call, and push the heartbeat onto the Consumer channel. - SetHeartbeatHandler(cb func(rpc RPC)) -} - -// AppendPipeline is used for pipelining AppendEntries requests. It is used -// to increase the replication throughput by masking latency and better -// utilizing bandwidth. -type AppendPipeline interface { - // AppendEntries is used to add another request to the pipeline. - // The send may block which is an effective form of back-pressure. - AppendEntries(args *AppendEntriesRequest, resp *AppendEntriesResponse) (AppendFuture, error) - - // Consumer returns a channel that can be used to consume - // response futures when they are ready. - Consumer() <-chan AppendFuture - - // Closes pipeline and cancels all inflight RPCs - Close() error -} - -// AppendFuture is used to return information about a pipelined AppendEntries request. -type AppendFuture interface { - Future - Start() time.Time - Request() *AppendEntriesRequest - Response() *AppendEntriesResponse -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/util.go b/Godeps/_workspace/src/github.com/hashicorp/raft/util.go deleted file mode 100644 index a6642c4c9..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/util.go +++ /dev/null @@ -1,200 +0,0 @@ -package raft - -import ( - "bytes" - crand "crypto/rand" - "encoding/binary" - "fmt" - "math" - "math/big" - "math/rand" - "time" - - "github.com/hashicorp/go-msgpack/codec" -) - -func init() { - // Ensure we use a high-entropy seed for the psuedo-random generator - rand.Seed(newSeed()) -} - -// returns an int64 from a crypto random source -// can be used to seed a source for a math/rand. -func newSeed() int64 { - r, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) - if err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) - } - return r.Int64() -} - -// randomTimeout returns a value that is between the minVal and 2x minVal. -func randomTimeout(minVal time.Duration) <-chan time.Time { - if minVal == 0 { - return nil - } - extra := (time.Duration(rand.Int63()) % minVal) - return time.After(minVal + extra) -} - -// min returns the minimum. -func min(a, b uint64) uint64 { - if a <= b { - return a - } - return b -} - -// max returns the maximum. -func max(a, b uint64) uint64 { - if a >= b { - return a - } - return b -} - -// generateUUID is used to generate a random UUID. -func generateUUID() string { - buf := make([]byte, 16) - if _, err := crand.Read(buf); err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) - } - - return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", - buf[0:4], - buf[4:6], - buf[6:8], - buf[8:10], - buf[10:16]) -} - -// asyncNotify is used to do an async channel send to -// a list of channels. This will not block. -func asyncNotify(chans []chan struct{}) { - for _, ch := range chans { - asyncNotifyCh(ch) - } -} - -// asyncNotifyCh is used to do an async channel send -// to a single channel without blocking. -func asyncNotifyCh(ch chan struct{}) { - select { - case ch <- struct{}{}: - default: - } -} - -// asyncNotifyBool is used to do an async notification -// on a bool channel. -func asyncNotifyBool(ch chan bool, v bool) { - select { - case ch <- v: - default: - } -} - -// ExcludePeer is used to exclude a single peer from a list of peers. -func ExcludePeer(peers []string, peer string) []string { - otherPeers := make([]string, 0, len(peers)) - for _, p := range peers { - if p != peer { - otherPeers = append(otherPeers, p) - } - } - return otherPeers -} - -// PeerContained checks if a given peer is contained in a list. -func PeerContained(peers []string, peer string) bool { - for _, p := range peers { - if p == peer { - return true - } - } - return false -} - -// AddUniquePeer is used to add a peer to a list of existing -// peers only if it is not already contained. -func AddUniquePeer(peers []string, peer string) []string { - if PeerContained(peers, peer) { - return peers - } - return append(peers, peer) -} - -// encodePeers is used to serialize a list of peers. -func encodePeers(peers []string, trans Transport) []byte { - // Encode each peer - var encPeers [][]byte - for _, p := range peers { - encPeers = append(encPeers, trans.EncodePeer(p)) - } - - // Encode the entire array - buf, err := encodeMsgPack(encPeers) - if err != nil { - panic(fmt.Errorf("failed to encode peers: %v", err)) - } - - return buf.Bytes() -} - -// decodePeers is used to deserialize a list of peers. -func decodePeers(buf []byte, trans Transport) []string { - // Decode the buffer first - var encPeers [][]byte - if err := decodeMsgPack(buf, &encPeers); err != nil { - panic(fmt.Errorf("failed to decode peers: %v", err)) - } - - // Deserialize each peer - var peers []string - for _, enc := range encPeers { - peers = append(peers, trans.DecodePeer(enc)) - } - - return peers -} - -// Decode reverses the encode operation on a byte slice input. -func decodeMsgPack(buf []byte, out interface{}) error { - r := bytes.NewBuffer(buf) - hd := codec.MsgpackHandle{} - dec := codec.NewDecoder(r, &hd) - return dec.Decode(out) -} - -// Encode writes an encoded object to a new bytes buffer. -func encodeMsgPack(in interface{}) (*bytes.Buffer, error) { - buf := bytes.NewBuffer(nil) - hd := codec.MsgpackHandle{} - enc := codec.NewEncoder(buf, &hd) - err := enc.Encode(in) - return buf, err -} - -// Converts bytes to an integer. -func bytesToUint64(b []byte) uint64 { - return binary.BigEndian.Uint64(b) -} - -// Converts a uint64 to a byte slice. -func uint64ToBytes(u uint64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, u) - return buf -} - -// backoff is used to compute an exponential backoff -// duration. Base time is scaled by the current round, -// up to some maximum scale factor. -func backoff(base time.Duration, round, limit uint64) time.Duration { - power := min(round, limit) - for power > 2 { - base *= 2 - power-- - } - return base -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/raft/util_test.go b/Godeps/_workspace/src/github.com/hashicorp/raft/util_test.go deleted file mode 100644 index 191510972..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/raft/util_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package raft - -import ( - "reflect" - "regexp" - "testing" - "time" -) - -func TestRandomTimeout(t *testing.T) { - start := time.Now() - timeout := randomTimeout(time.Millisecond) - - select { - case <-timeout: - diff := time.Now().Sub(start) - if diff < time.Millisecond { - t.Fatalf("fired early") - } - case <-time.After(3 * time.Millisecond): - t.Fatalf("timeout") - } -} - -func TestNewSeed(t *testing.T) { - vals := make(map[int64]bool) - for i := 0; i < 1000; i++ { - seed := newSeed() - if _, exists := vals[seed]; exists { - t.Fatal("newSeed() return a value it'd previously returned") - } - vals[seed] = true - } -} - -func TestRandomTimeout_NoTime(t *testing.T) { - timeout := randomTimeout(0) - if timeout != nil { - t.Fatalf("expected nil channel") - } -} - -func TestMin(t *testing.T) { - if min(1, 1) != 1 { - t.Fatalf("bad min") - } - if min(2, 1) != 1 { - t.Fatalf("bad min") - } - if min(1, 2) != 1 { - t.Fatalf("bad min") - } -} - -func TestMax(t *testing.T) { - if max(1, 1) != 1 { - t.Fatalf("bad max") - } - if max(2, 1) != 2 { - t.Fatalf("bad max") - } - if max(1, 2) != 2 { - t.Fatalf("bad max") - } -} - -func TestGenerateUUID(t *testing.T) { - prev := generateUUID() - for i := 0; i < 100; i++ { - id := generateUUID() - if prev == id { - t.Fatalf("Should get a new ID!") - } - - matched, err := regexp.MatchString( - `[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}`, id) - if !matched || err != nil { - t.Fatalf("expected match %s %v %s", id, matched, err) - } - } -} - -func TestAsyncNotify(t *testing.T) { - chs := []chan struct{}{ - make(chan struct{}), - make(chan struct{}, 1), - make(chan struct{}, 2), - } - - // Should not block! - asyncNotify(chs) - asyncNotify(chs) - asyncNotify(chs) - - // Try to read - select { - case <-chs[0]: - t.Fatalf("should not have message!") - default: - } - select { - case <-chs[1]: - default: - t.Fatalf("should have message!") - } - select { - case <-chs[2]: - default: - t.Fatalf("should have message!") - } - select { - case <-chs[2]: - default: - t.Fatalf("should have message!") - } -} - -func TestExcludePeer(t *testing.T) { - peers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} - peer := peers[2] - - after := ExcludePeer(peers, peer) - if len(after) != 2 { - t.Fatalf("Bad length") - } - if after[0] == peer || after[1] == peer { - t.Fatalf("should not contain peer") - } -} - -func TestPeerContained(t *testing.T) { - peers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} - - if !PeerContained(peers, peers[2]) { - t.Fatalf("Expect contained") - } - if PeerContained(peers, NewInmemAddr()) { - t.Fatalf("unexpected contained") - } -} - -func TestAddUniquePeer(t *testing.T) { - peers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} - after := AddUniquePeer(peers, peers[2]) - if !reflect.DeepEqual(after, peers) { - t.Fatalf("unexpected append") - } - after = AddUniquePeer(peers, NewInmemAddr()) - if len(after) != 4 { - t.Fatalf("expected append") - } -} - -func TestEncodeDecodePeers(t *testing.T) { - peers := []string{NewInmemAddr(), NewInmemAddr(), NewInmemAddr()} - _, trans := NewInmemTransport() - - // Try to encode/decode - buf := encodePeers(peers, trans) - decoded := decodePeers(buf, trans) - - if !reflect.DeepEqual(peers, decoded) { - t.Fatalf("mismatch %v %v", peers, decoded) - } -} - -func TestBackoff(t *testing.T) { - b := backoff(10*time.Millisecond, 1, 8) - if b != 10*time.Millisecond { - t.Fatalf("bad: %v", b) - } - - b = backoff(20*time.Millisecond, 2, 8) - if b != 20*time.Millisecond { - t.Fatalf("bad: %v", b) - } - - b = backoff(10*time.Millisecond, 8, 8) - if b != 640*time.Millisecond { - t.Fatalf("bad: %v", b) - } - - b = backoff(10*time.Millisecond, 9, 8) - if b != 640*time.Millisecond { - t.Fatalf("bad: %v", b) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/.gitignore b/Godeps/_workspace/src/github.com/influxdb/influxdb/.gitignore deleted file mode 100644 index 50c31e055..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/.gitignore +++ /dev/null @@ -1,75 +0,0 @@ -*~ -src/ - -config.json -/bin/ - -TAGS - -# vim temp files -*.swp - -*.test -/query/a.out* -.DS_Store - -# ignore generated files. -cmd/influxd/version.go - -# executables - -influx_stress -**/influx_stress -!**/influx_stress/ - -influxd -**/influxd -!**/influxd/ - -influx -**/influx -!**/influx/ - -influxdb -**/influxdb -!**/influxdb/ - -influx_inspect -**/influx_inspect -!**/influx_inspect/ - -/benchmark-tool -/main -/benchmark-storage -godef -gosym -gocode -inspect-raft - -# dependencies -out_rpm/ -packages/ - -# autconf -autom4te.cache/ -config.log -config.status - -# log file -influxdb.log -benchmark.log - -# config file -config.toml - -# test data files -integration/migration_data/ - -# goide project files -.idea - -# goconvey config files -*.goconvey - -// Ingnore SourceGraph directory -.srclib-store/ diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/CHANGELOG.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/CHANGELOG.md deleted file mode 100644 index 3f84419b9..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/CHANGELOG.md +++ /dev/null @@ -1,1784 +0,0 @@ -## v0.9.5 [unreleased] - -### Release Notes -- Field names for the internal stats have been changed to be more inline with Go style. -- 0.9.5 is reverting to Go 1.4.2 due to unresolved issues with Go 1.5.1. - -### Features -- [#4098](https://github.com/influxdb/influxdb/pull/4702): Support 'history' command at CLI -- [#4098](https://github.com/influxdb/influxdb/issues/4098): Enable `golint` on the code base - uuid subpackage -- [#4141](https://github.com/influxdb/influxdb/pull/4141): Control whether each query should be logged -- [#4065](https://github.com/influxdb/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex -- [#4140](https://github.com/influxdb/influxdb/pull/4140): Make storage engine configurable -- [#4161](https://github.com/influxdb/influxdb/pull/4161): Implement bottom selector function -- [#4204](https://github.com/influxdb/influxdb/pull/4204): Allow module-level selection for SHOW STATS -- [#4208](https://github.com/influxdb/influxdb/pull/4208): Allow module-level selection for SHOW DIAGNOSTICS -- [#4196](https://github.com/influxdb/influxdb/pull/4196): Export tsdb.Iterator -- [#4198](https://github.com/influxdb/influxdb/pull/4198): Add basic cluster-service stats -- [#4262](https://github.com/influxdb/influxdb/pull/4262): Allow configuration of UDP retention policy -- [#4265](https://github.com/influxdb/influxdb/pull/4265): Add statistics for Hinted-Handoff -- [#4284](https://github.com/influxdb/influxdb/pull/4284): Add exponential backoff for hinted-handoff failures -- [#4310](https://github.com/influxdb/influxdb/pull/4310): Support dropping non-Raft nodes. Work mostly by @corylanou -- [#4348](https://github.com/influxdb/influxdb/pull/4348): Public ApplyTemplate function for graphite parser. -- [#4178](https://github.com/influxdb/influxdb/pull/4178): Support fields in graphite parser. Thanks @roobert! -- [#4409](https://github.com/influxdb/influxdb/pull/4409): wire up INTO queries. -- [#4379](https://github.com/influxdb/influxdb/pull/4379): Auto-create database for UDP input. -- [#4375](https://github.com/influxdb/influxdb/pull/4375): Add Subscriptions so data can be 'forked' out of InfluxDB to another third party. -- [#4506](https://github.com/influxdb/influxdb/pull/4506): Register with Enterprise service and upload stats, if token is available. -- [#4516](https://github.com/influxdb/influxdb/pull/4516): Hinted-handoff refactor, with new statistics and diagnostics -- [#4501](https://github.com/influxdb/influxdb/pull/4501): Allow filtering SHOW MEASUREMENTS by regex. -- [#4547](https://github.com/influxdb/influxdb/pull/4547): Allow any node to be dropped, even a raft node (even the leader). -- [#4600](https://github.com/influxdb/influxdb/pull/4600): ping endpoint can wait for leader -- [#4648](https://github.com/influxdb/influxdb/pull/4648): UDP Client (v2 client) -- [#4690](https://github.com/influxdb/influxdb/pull/4690): SHOW SHARDS now includes database and policy. Thanks @pires -- [#4676](https://github.com/influxdb/influxdb/pull/4676): UDP service listener performance enhancements -- [#4659](https://github.com/influxdb/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE. Thanks @ch33hau -- [#4721](https://github.com/influxdb/influxdb/pull/4721): Export tsdb.InterfaceValues -- [#4681](https://github.com/influxdb/influxdb/pull/4681): Increase default buffer size for collectd and graphite listeners -- [#4659](https://github.com/influxdb/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE -- [#4685](https://github.com/influxdb/influxdb/pull/4685): Automatically promote node to raft peer if drop server results in removing a raft peer. - -### Bugfixes -- [#4715](https://github.com/influxdb/influxdb/pull/4715): Fix panic during Raft-close. Fix [issue #4707](https://github.com/influxdb/influxdb/issues/4707). Thanks @oiooj -- [#4643](https://github.com/influxdb/influxdb/pull/4643): Fix panic during backup restoration. Thanks @oiooj -- [#4632](https://github.com/influxdb/influxdb/pull/4632): Fix parsing of IPv6 hosts in client package. Thanks @miguelxpn -- [#4389](https://github.com/influxdb/influxdb/pull/4389): Don't add a new segment file on each hinted-handoff purge cycle. -- [#4166](https://github.com/influxdb/influxdb/pull/4166): Fix parser error on invalid SHOW -- [#3457](https://github.com/influxdb/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name -- [#4704](https://github.com/influxdb/influxdb/pull/4704). Tighten up command parsing within CLI. Thanks @pires -- [#4225](https://github.com/influxdb/influxdb/pull/4225): Always display diags in name-sorted order -- [#4111](https://github.com/influxdb/influxdb/pull/4111): Update pre-commit hook for go vet composites -- [#4136](https://github.com/influxdb/influxdb/pull/4136): Return an error-on-write if target retention policy does not exist. Thanks for the report @ymettier -- [#4228](https://github.com/influxdb/influxdb/pull/4228): Add build timestamp to version information. -- [#4124](https://github.com/influxdb/influxdb/issues/4124): Missing defer/recover/panic idiom in HTTPD service -- [#4238](https://github.com/influxdb/influxdb/pull/4238): Fully disable hinted-handoff service if so requested. -- [#4165](https://github.com/influxdb/influxdb/pull/4165): Tag all Go runtime stats when writing to internal database. -- [#4586](https://github.com/influxdb/influxdb/pull/4586): Exit when invalid engine is selected -- [#4118](https://github.com/influxdb/influxdb/issues/4118): Return consistent, correct result for SHOW MEASUREMENTS with multiple AND conditions -- [#4191](https://github.com/influxdb/influxdb/pull/4191): Correctly marshal remote mapper responses. Fixes [#4170](https://github.com/influxdb/influxdb/issues/4170) -- [#4222](https://github.com/influxdb/influxdb/pull/4222): Graphite TCP connections should not block shutdown -- [#4180](https://github.com/influxdb/influxdb/pull/4180): Cursor & SelectMapper Refactor -- [#1577](https://github.com/influxdb/influxdb/issues/1577): selectors (e.g. min, max, first, last) should have equivalents to return the actual point -- [#4264](https://github.com/influxdb/influxdb/issues/4264): Refactor map functions to use list of values -- [#4278](https://github.com/influxdb/influxdb/pull/4278): Fix error marshalling across the cluster -- [#4149](https://github.com/influxdb/influxdb/pull/4149): Fix derivative unnecessarily requires aggregate function. Thanks @peekeri! -- [#4674](https://github.com/influxdb/influxdb/pull/4674): Fix panic during restore. Thanks @simcap. -- [#4725](https://github.com/influxdb/influxdb/pull/4725): Don't list deleted shards during SHOW SHARDS. -- [#4237](https://github.com/influxdb/influxdb/issues/4237): DERIVATIVE() edge conditions -- [#4263](https://github.com/influxdb/influxdb/issues/4263): derivative does not work when data is missing -- [#4293](https://github.com/influxdb/influxdb/pull/4293): Ensure shell is invoked when touching PID file. Thanks @christopherjdickson -- [#4296](https://github.com/influxdb/influxdb/pull/4296): Reject line protocol ending with '-'. Fixes [#4272](https://github.com/influxdb/influxdb/issues/4272) -- [#4333](https://github.com/influxdb/influxdb/pull/4333): Retry monitor storage creation and storage only on Leader. -- [#4276](https://github.com/influxdb/influxdb/issues/4276): Walk DropSeriesStatement & check for empty sources -- [#4465](https://github.com/influxdb/influxdb/pull/4465): Actually display a message if the CLI can't connect to the database. -- [#4342](https://github.com/influxdb/influxdb/pull/4342): Fix mixing aggregates and math with non-aggregates. Thanks @kostya-sh. -- [#4349](https://github.com/influxdb/influxdb/issues/4349): If HH can't unmarshal a block, skip that block. -- [#4502](https://github.com/influxdb/influxdb/pull/4502): Don't crash on Graphite close, if Graphite not fully open. Thanks for the report @ranjib -- [#4354](https://github.com/influxdb/influxdb/pull/4353): Fully lock node queues during hinted handoff. Fixes one cause of missing data on clusters. -- [#4357](https://github.com/influxdb/influxdb/issues/4357): Fix similar float values encoding overflow Thanks @dgryski! -- [#4344](https://github.com/influxdb/influxdb/issues/4344): Make client.Write default to client.precision if none is given. -- [#3429](https://github.com/influxdb/influxdb/issues/3429): Incorrect parsing of regex containing '/' -- [#4374](https://github.com/influxdb/influxdb/issues/4374): Add tsm1 quickcheck tests -- [#4644](https://github.com/influxdb/influxdb/pull/4644): Check for response errors during token check, fixes issue [#4641](https://github.com/influxdb/influxdb/issues/4641) -- [#4377](https://github.com/influxdb/influxdb/pull/4377): Hinted handoff should not process dropped nodes -- [#4365](https://github.com/influxdb/influxdb/issues/4365): Prevent panic in DecodeSameTypeBlock -- [#4280](https://github.com/influxdb/influxdb/issues/4280): Only drop points matching WHERE clause -- [#4443](https://github.com/influxdb/influxdb/pull/4443): Fix race condition while listing store's shards. Fixes [#4442](https://github.com/influxdb/influxdb/issues/4442) -- [#4410](https://github.com/influxdb/influxdb/pull/4410): Fix infinite recursion in statement string(). Thanks @kostya-sh -- [#4360](https://github.com/influxdb/influxdb/issues/4360): Aggregate Selectors overwrite values during post-processing -- [#4421](https://github.com/influxdb/influxdb/issues/4421): Fix line protocol accepting tags with no values -- [#4434](https://github.com/influxdb/influxdb/pull/4434): Allow 'E' for scientific values. Fixes [#4433](https://github.com/influxdb/influxdb/issues/4433) -- [#4431](https://github.com/influxdb/influxdb/issues/4431): Add tsm1 WAL QuickCheck -- [#4438](https://github.com/influxdb/influxdb/pull/4438): openTSDB service shutdown fixes -- [#4447](https://github.com/influxdb/influxdb/pull/4447): Fixes to logrotate file. Thanks @linsomniac. -- [#3820](https://github.com/influxdb/influxdb/issues/3820): Fix js error in admin UI. -- [#4460](https://github.com/influxdb/influxdb/issues/4460): tsm1 meta lint -- [#4415](https://github.com/influxdb/influxdb/issues/4415): Selector (like max, min, first, etc) return a string instead of timestamp -- [#4472](https://github.com/influxdb/influxdb/issues/4472): Fix 'too many points in GROUP BY interval' error -- [#4475](https://github.com/influxdb/influxdb/issues/4475): Fix SHOW TAG VALUES error message. -- [#4486](https://github.com/influxdb/influxdb/pull/4486): Fix missing comments for runner package -- [#4497](https://github.com/influxdb/influxdb/pull/4497): Fix sequence in meta proto -- [#3367](https://github.com/influxdb/influxdb/issues/3367): Negative timestamps are parsed correctly by the line protocol. -- [#4563](https://github.com/influxdb/influxdb/pull/4536): Fix broken subscriptions updates. -- [#4538](https://github.com/influxdb/influxdb/issues/4538): Dropping database under a write load causes panics -- [#4582](https://github.com/influxdb/influxdb/pull/4582): Correct logging tags in cluster and TCP package. Thanks @oiooj -- [#4513](https://github.com/influxdb/influxdb/issues/4513): TSM1: panic: runtime error: index out of range -- [#4521](https://github.com/influxdb/influxdb/issues/4521): TSM1: panic: decode of short block: got 1, exp 9 -- [#4587](https://github.com/influxdb/influxdb/pull/4587): Prevent NaN float values from being stored -- [#4596](https://github.com/influxdb/influxdb/pull/4596): Skip empty string for start position when parsing line protocol @Thanks @ch33hau -- [#4610](https://github.com/influxdb/influxdb/pull/4610): Make internal stats names consistent with Go style. -- [#4625](https://github.com/influxdb/influxdb/pull/4625): Correctly handle bad write requests. Thanks @oiooj. -- [#4650](https://github.com/influxdb/influxdb/issues/4650): Importer should skip empty lines -- [#4651](https://github.com/influxdb/influxdb/issues/4651): Importer doesn't flush out last batch -- [#4602](https://github.com/influxdb/influxdb/issues/4602): Fixes data race between PointsWriter and Subscriber services. -- [#4691](https://github.com/influxdb/influxdb/issues/4691): Enable toml test `TestConfig_Encode`. -- [#4283](https://github.com/influxdb/influxdb/pull/4283): Disable HintedHandoff if configuration is not set. -- [#4703](https://github.com/influxdb/influxdb/pull/4703): Complete lint for cmd/influx. Thanks @pablolmiranda - -## v0.9.4 [2015-09-14] - -### Release Notes -With this release InfluxDB is moving to Go 1.5. - -### Features -- [#4050](https://github.com/influxdb/influxdb/pull/4050): Add stats to collectd -- [#3771](https://github.com/influxdb/influxdb/pull/3771): Close idle Graphite TCP connections -- [#3755](https://github.com/influxdb/influxdb/issues/3755): Add option to build script. Thanks @fg2it -- [#3863](https://github.com/influxdb/influxdb/pull/3863): Move to Go 1.5 -- [#3892](https://github.com/influxdb/influxdb/pull/3892): Support IF NOT EXISTS for CREATE DATABASE -- [#3916](https://github.com/influxdb/influxdb/pull/3916): New statistics and diagnostics support. Graphite first to be instrumented. -- [#3901](https://github.com/influxdb/influxdb/pull/3901): Add consistency level option to influx cli Thanks @takayuki -- [#4048](https://github.com/influxdb/influxdb/pull/4048): Add statistics to Continuous Query service -- [#4049](https://github.com/influxdb/influxdb/pull/4049): Add stats to the UDP input -- [#3876](https://github.com/influxdb/influxdb/pull/3876): Allow the following syntax in CQs: INTO "1hPolicy".:MEASUREMENT -- [#3975](https://github.com/influxdb/influxdb/pull/3975): Add shard copy service -- [#3986](https://github.com/influxdb/influxdb/pull/3986): Support sorting by time desc -- [#3930](https://github.com/influxdb/influxdb/pull/3930): Wire up TOP aggregate function - fixes [#1821](https://github.com/influxdb/influxdb/issues/1821) -- [#4045](https://github.com/influxdb/influxdb/pull/4045): Instrument cluster-level points writer -- [#3996](https://github.com/influxdb/influxdb/pull/3996): Add statistics to httpd package -- [#4003](https://github.com/influxdb/influxdb/pull/4033): Add logrotate configuration. -- [#4043](https://github.com/influxdb/influxdb/pull/4043): Add stats and batching to openTSDB input -- [#4042](https://github.com/influxdb/influxdb/pull/4042): Add pending batches control to batcher -- [#4006](https://github.com/influxdb/influxdb/pull/4006): Add basic statistics for shards -- [#4072](https://github.com/influxdb/influxdb/pull/4072): Add statistics for the WAL. - -### Bugfixes -- [#4042](https://github.com/influxdb/influxdb/pull/4042): Set UDP input batching defaults as needed. -- [#3785](https://github.com/influxdb/influxdb/issues/3785): Invalid time stamp in graphite metric causes panic -- [#3804](https://github.com/influxdb/influxdb/pull/3804): init.d script fixes, fixes issue 3803. -- [#3823](https://github.com/influxdb/influxdb/pull/3823): Deterministic ordering for first() and last() -- [#3869](https://github.com/influxdb/influxdb/issues/3869): Seemingly deadlocked when ingesting metrics via graphite plugin -- [#3856](https://github.com/influxdb/influxdb/pull/3856): Minor changes to retention enforcement. -- [#3884](https://github.com/influxdb/influxdb/pull/3884): Fix two panics in WAL that can happen at server startup -- [#3868](https://github.com/influxdb/influxdb/pull/3868): Add shell option to start the daemon on CentOS. Thanks @SwannCroiset. -- [#3886](https://github.com/influxdb/influxdb/pull/3886): Prevent write timeouts due to lock contention in WAL -- [#3574](https://github.com/influxdb/influxdb/issues/3574): Querying data node causes panic -- [#3913](https://github.com/influxdb/influxdb/issues/3913): Convert meta shard owners to objects -- [#4026](https://github.com/influxdb/influxdb/pull/4026): Support multiple Graphite inputs. Fixes issue [#3636](https://github.com/influxdb/influxdb/issues/3636) -- [#3927](https://github.com/influxdb/influxdb/issues/3927): Add WAL lock to prevent timing lock contention -- [#3928](https://github.com/influxdb/influxdb/issues/3928): Write fails for multiple points when tag starts with quote -- [#3901](https://github.com/influxdb/influxdb/pull/3901): Unblock relaxed write consistency level Thanks @takayuki! -- [#3950](https://github.com/influxdb/influxdb/pull/3950): Limit bz1 quickcheck tests to 10 iterations on CI -- [#3977](https://github.com/influxdb/influxdb/pull/3977): Silence wal logging during testing -- [#3931](https://github.com/influxdb/influxdb/pull/3931): Don't precreate shard groups entirely in the past -- [#3960](https://github.com/influxdb/influxdb/issues/3960): possible "catch up" bug with nodes down in a cluster -- [#3980](https://github.com/influxdb/influxdb/pull/3980): 'service stop' waits until service actually stops. Fixes issue #3548. -- [#4016](https://github.com/influxdb/influxdb/pull/4016): Shutdown Graphite UDP on SIGTERM. -- [#4034](https://github.com/influxdb/influxdb/pull/4034): Rollback bolt tx on mapper open error -- [#3848](https://github.com/influxdb/influxdb/issues/3848): restart influxdb causing panic -- [#3881](https://github.com/influxdb/influxdb/issues/3881): panic: runtime error: invalid memory address or nil pointer dereference -- [#3926](https://github.com/influxdb/influxdb/issues/3926): First or last value of `GROUP BY time(x)` is often null. Fixed by [#4038](https://github.com/influxdb/influxdb/pull/4038) -- [#4053](https://github.com/influxdb/influxdb/pull/4053): Prohibit dropping default retention policy. -- [#4060](https://github.com/influxdb/influxdb/pull/4060): Don't log EOF error in openTSDB input. -- [#3978](https://github.com/influxdb/influxdb/issues/3978): [0.9.3] (regression) cannot use GROUP BY * with more than a single field in SELECT clause -- [#4058](https://github.com/influxdb/influxdb/pull/4058): Disable bz1 recompression -- [#3902](https://github.com/influxdb/influxdb/issues/3902): [0.9.3] DB should not crash when using invalid expression "GROUP BY time" -- [#3718](https://github.com/influxdb/influxdb/issues/3718): Derivative query with group by time but no aggregate function should fail parse - -## v0.9.3 [2015-08-26] - -### Release Notes - -There are breaking changes in this release. - - To store data points as integers you must now append `i` to the number if using the line protocol. - - If you have a UDP input configured, you should check the UDP section of [the new sample configuration file](https://github.com/influxdb/influxdb/blob/master/etc/config.sample.toml) to learn how to modify existing configuration files, as 0.9.3 now expects multiple UDP inputs. - - Configuration files must now have an entry for `wal-dir` in the `[data]` section. Check [new sample configuration file](https://github.com/influxdb/influxdb/blob/master/etc/config.sample.toml) for more details. - - The implicit `GROUP BY *` that was added to every `SELECT *` has been removed. Instead any tags in the data are now part of the columns in the returned query. - -Please see the *Features* section below for full details. - -### Features -- [#3376](https://github.com/influxdb/influxdb/pull/3376): Support for remote shard query mapping -- [#3372](https://github.com/influxdb/influxdb/pull/3372): Support joining nodes to existing cluster -- [#3426](https://github.com/influxdb/influxdb/pull/3426): Additional logging for continuous queries. Thanks @jhorwit2 -- [#3478](https://github.com/influxdb/influxdb/pull/3478): Support incremental cluster joins -- [#3519](https://github.com/influxdb/influxdb/pull/3519): **--BREAKING CHANGE--** Update line protocol to require trailing i for field values that are integers -- [#3529](https://github.com/influxdb/influxdb/pull/3529): Add TLS support for OpenTSDB plugin. Thanks @nathanielc -- [#3421](https://github.com/influxdb/influxdb/issues/3421): Should update metastore and cluster if IP or hostname changes -- [#3502](https://github.com/influxdb/influxdb/pull/3502): Importer for 0.8.9 data via the CLI -- [#3564](https://github.com/influxdb/influxdb/pull/3564): Fix alias, maintain column sort order -- [#3585](https://github.com/influxdb/influxdb/pull/3585): Additional test coverage for non-existent fields -- [#3246](https://github.com/influxdb/influxdb/issues/3246): Allow overriding of configuration parameters using environment variables -- [#3599](https://github.com/influxdb/influxdb/pull/3599): **--BREAKING CHANGE--** Support multiple UDP inputs. Thanks @tpitale -- [#3636](https://github.com/influxdb/influxdb/pull/3639): Cap auto-created retention policy replica count at 3 -- [#3641](https://github.com/influxdb/influxdb/pull/3641): Logging enhancements and single-node rename -- [#3635](https://github.com/influxdb/influxdb/pull/3635): Add build branch to version output. -- [#3115](https://github.com/influxdb/influxdb/pull/3115): Various init.d script improvements. Thanks @KoeSystems. -- [#3628](https://github.com/influxdb/influxdb/pull/3628): Wildcard expansion of tags and fields for raw queries -- [#3721](https://github.com/influxdb/influxdb/pull/3721): interpret number literals compared against time as nanoseconds from epoch -- [#3514](https://github.com/influxdb/influxdb/issues/3514): Implement WAL outside BoltDB with compaction -- [#3544](https://github.com/influxdb/influxdb/pull/3544): Implement compression on top of BoltDB -- [#3795](https://github.com/influxdb/influxdb/pull/3795): Throttle import -- [#3584](https://github.com/influxdb/influxdb/pull/3584): Import/export documenation - -### Bugfixes -- [#3405](https://github.com/influxdb/influxdb/pull/3405): Prevent database panic when fields are missing. Thanks @jhorwit2 -- [#3411](https://github.com/influxdb/influxdb/issues/3411): 500 timeout on write -- [#3420](https://github.com/influxdb/influxdb/pull/3420): Catch opentsdb malformed tags. Thanks @nathanielc. -- [#3404](https://github.com/influxdb/influxdb/pull/3404): Added support for escaped single quotes in query string. Thanks @jhorwit2 -- [#3414](https://github.com/influxdb/influxdb/issues/3414): Shard mappers perform query re-writing -- [#3525](https://github.com/influxdb/influxdb/pull/3525): check if fields are valid during parse time. -- [#3511](https://github.com/influxdb/influxdb/issues/3511): Sending a large number of tag causes panic -- [#3288](https://github.com/influxdb/influxdb/issues/3288): Run go fuzz on the line-protocol input -- [#3545](https://github.com/influxdb/influxdb/issues/3545): Fix parsing string fields with newlines -- [#3579](https://github.com/influxdb/influxdb/issues/3579): Revert breaking change to `client.NewClient` function -- [#3580](https://github.com/influxdb/influxdb/issues/3580): Do not allow wildcards with fields in select statements -- [#3530](https://github.com/influxdb/influxdb/pull/3530): Aliasing a column no longer works -- [#3436](https://github.com/influxdb/influxdb/issues/3436): Fix panic in hinted handoff queue processor -- [#3401](https://github.com/influxdb/influxdb/issues/3401): Derivative on non-numeric fields panics db -- [#3583](https://github.com/influxdb/influxdb/issues/3583): Inserting value in scientific notation with a trailing i causes panic -- [#3611](https://github.com/influxdb/influxdb/pull/3611): Fix query arithmetic with integers -- [#3326](https://github.com/influxdb/influxdb/issues/3326): simple regex query fails with cryptic error -- [#3618](https://github.com/influxdb/influxdb/pull/3618): Fix collectd stats panic on i386. Thanks @richterger -- [#3625](https://github.com/influxdb/influxdb/pull/3625): Don't panic when aggregate and raw queries are in a single statement -- [#3629](https://github.com/influxdb/influxdb/pull/3629): Use sensible batching defaults for Graphite. -- [#3638](https://github.com/influxdb/influxdb/pull/3638): Cluster config fixes and removal of meta.peers config field -- [#3640](https://github.com/influxdb/influxdb/pull/3640): Shutdown Graphite service when signal received. -- [#3632](https://github.com/influxdb/influxdb/issues/3632): Make single-node host renames more seamless -- [#3656](https://github.com/influxdb/influxdb/issues/3656): Silence snapshotter logger for testing -- [#3651](https://github.com/influxdb/influxdb/pull/3651): Fully remove series when dropped. -- [#3517](https://github.com/influxdb/influxdb/pull/3517): Batch CQ writes to avoid timeouts. Thanks @dim. -- [#3522](https://github.com/influxdb/influxdb/pull/3522): Consume CQ results on request timeouts. Thanks @dim. -- [#3646](https://github.com/influxdb/influxdb/pull/3646): Fix nil FieldCodec panic. -- [#3672](https://github.com/influxdb/influxdb/pull/3672): Reduce in-memory index by 20%-30% -- [#3673](https://github.com/influxdb/influxdb/pull/3673): Improve query performance by removing unnecessary tagset sorting. -- [#3676](https://github.com/influxdb/influxdb/pull/3676): Improve query performance by memomizing mapper output keys. -- [#3686](https://github.com/influxdb/influxdb/pull/3686): Ensure 'p' parameter is not logged, even on OPTIONS requests. -- [#3687](https://github.com/influxdb/influxdb/issues/3687): Fix panic: runtime error: makeslice: len out of range in hinted handoff -- [#3697](https://github.com/influxdb/influxdb/issues/3697): Correctly merge non-chunked results for same series. Fix issue #3242. -- [#3708](https://github.com/influxdb/influxdb/issues/3708): Fix double escaping measurement name during cluster replication -- [#3704](https://github.com/influxdb/influxdb/issues/3704): cluster replication issue for measurement name containing backslash -- [#3681](https://github.com/influxdb/influxdb/issues/3681): Quoted measurement names fail -- [#3681](https://github.com/influxdb/influxdb/issues/3682): Fix inserting string value with backslashes -- [#3735](https://github.com/influxdb/influxdb/issues/3735): Append to small bz1 blocks -- [#3736](https://github.com/influxdb/influxdb/pull/3736): Update shard group duration with retention policy changes. Thanks for the report @papylhomme -- [#3539](https://github.com/influxdb/influxdb/issues/3539): parser incorrectly accepts NaN as numerical value, but not always -- [#3790](https://github.com/influxdb/influxdb/pull/3790): Fix line protocol parsing equals in measurements and NaN values -- [#3778](https://github.com/influxdb/influxdb/pull/3778): Don't panic if SELECT on time. -- [#3824](https://github.com/influxdb/influxdb/issues/3824): tsdb.Point.MarshalBinary needs to support all number types -- [#3828](https://github.com/influxdb/influxdb/pull/3828): Support all number types when decoding a point -- [#3853](https://github.com/influxdb/influxdb/pull/3853): Use 4KB default block size for bz1 -- [#3607](https://github.com/influxdb/influxdb/issues/3607): Fix unable to query influxdb due to deadlock in metastore. Thanks @ccutrer! - -## v0.9.2 [2015-07-24] - -### Features -- [#3177](https://github.com/influxdb/influxdb/pull/3177): Client supports making HTTPS requests. Thanks @jipperinbham -- [#3299](https://github.com/influxdb/influxdb/pull/3299): Refactor query engine for distributed query support. -- [#3334](https://github.com/influxdb/influxdb/pull/3334): Clean shutdown of influxd. Thanks @mcastilho - -### Bugfixes - -- [#3180](https://github.com/influxdb/influxdb/pull/3180): Log GOMAXPROCS, version, and commit on startup. -- [#3218](https://github.com/influxdb/influxdb/pull/3218): Allow write timeouts to be configurable. -- [#3184](https://github.com/influxdb/influxdb/pull/3184): Support basic auth in admin interface. Thanks @jipperinbham! -- [#3236](https://github.com/influxdb/influxdb/pull/3236): Fix display issues in admin interface. -- [#3232](https://github.com/influxdb/influxdb/pull/3232): Set logging prefix for metastore. -- [#3230](https://github.com/influxdb/influxdb/issues/3230): panic: unable to parse bool value -- [#3245](https://github.com/influxdb/influxdb/issues/3245): Error using graphite plugin with multiple filters -- [#3223](https://github.com/influxdb/influxdb/issues/323): default graphite template cannot have extra tags -- [#3255](https://github.com/influxdb/influxdb/pull/3255): Flush WAL on start-up as soon as possible. -- [#3289](https://github.com/influxdb/influxdb/issues/3289): InfluxDB crashes on floats without decimal -- [#3298](https://github.com/influxdb/influxdb/pull/3298): Corrected WAL & flush parameters in default config. Thanks @jhorwit2 -- [#3152](https://github.com/influxdb/influxdb/issues/3159): High CPU Usage with unsorted writes -- [#3307](https://github.com/influxdb/influxdb/pull/3307): Fix regression parsing boolean values True/False -- [#3304](https://github.com/influxdb/influxdb/pull/3304): Fixed httpd logger to log user from query params. Thanks @jhorwit2 -- [#3332](https://github.com/influxdb/influxdb/pull/3332): Add SLIMIT and SOFFSET to string version of AST. -- [#3335](https://github.com/influxdb/influxdb/pull/3335): Don't drop all data on DROP DATABASE. Thanks to @PierreF for the report -- [#2761](https://github.com/influxdb/influxdb/issues/2761): Make SHOW RETENTION POLICIES consistent with other queries. -- [#3356](https://github.com/influxdb/influxdb/pull/3356): Disregard semicolons after database name in use command. Thanks @timraymond. -- [#3351](https://github.com/influxdb/influxdb/pull/3351): Handle malformed regex comparisons during parsing. Thanks @rnubel -- [#3244](https://github.com/influxdb/influxdb/pull/3244): Wire up admin privilege grant and revoke. -- [#3259](https://github.com/influxdb/influxdb/issues/3259): Respect privileges for queries. -- [#3256](https://github.com/influxdb/influxdb/pull/3256): Remove unnecessary timeout in WaitForLeader(). Thanks @cannium. -- [#3380](https://github.com/influxdb/influxdb/issue/3380): Parser fix, only allow ORDER BY ASC and ORDER BY time ASC. -- [#3319](https://github.com/influxdb/influxdb/issues/3319): restarting process irrevocably BREAKS measurements with spaces -- [#3453](https://github.com/influxdb/influxdb/issues/3453): Remove outdated `dump` command from CLI. -- [#3463](https://github.com/influxdb/influxdb/issues/3463): Fix aggregate queries and time precision on where clauses. - -## v0.9.1 [2015-07-02] - -### Features - -- [2650](https://github.com/influxdb/influxdb/pull/2650): Add SHOW GRANTS FOR USER statement. Thanks @n1tr0g. -- [3125](https://github.com/influxdb/influxdb/pull/3125): Graphite Input Protocol Parsing -- [2746](https://github.com/influxdb/influxdb/pull/2746): New Admin UI/interface -- [3036](https://github.com/influxdb/influxdb/pull/3036): Write Ahead Log (WAL) -- [3014](https://github.com/influxdb/influxdb/issues/3014): Implement Raft snapshots - -### Bugfixes - -- [3013](https://github.com/influxdb/influxdb/issues/3013): Panic error with inserting values with commas -- [#2956](https://github.com/influxdb/influxdb/issues/2956): Type mismatch in derivative -- [#2908](https://github.com/influxdb/influxdb/issues/2908): Field mismatch error messages need to be updated -- [#2931](https://github.com/influxdb/influxdb/pull/2931): Services and reporting should wait until cluster has leader. -- [#2943](https://github.com/influxdb/influxdb/issues/2943): Ensure default retention policies are fully replicated -- [#2948](https://github.com/influxdb/influxdb/issues/2948): Field mismatch error message to include measurement name -- [#2919](https://github.com/influxdb/influxdb/issues/2919): Unable to insert negative floats -- [#2935](https://github.com/influxdb/influxdb/issues/2935): Hook CPU and memory profiling back up. -- [#2960](https://github.com/influxdb/influxdb/issues/2960): Cluster Write Errors. -- [#2928](https://github.com/influxdb/influxdb/pull/2928): Start work to set InfluxDB version in HTTP response headers. Thanks @neonstalwart. -- [#2969](https://github.com/influxdb/influxdb/pull/2969): Actually set HTTP version in responses. -- [#2993](https://github.com/influxdb/influxdb/pull/2993): Don't log each UDP batch. -- [#2994](https://github.com/influxdb/influxdb/pull/2994): Don't panic during wilcard expansion if no default database specified. -- [#3002](https://github.com/influxdb/influxdb/pull/3002): Remove measurement from shard's index on DROP MEASUREMENT. -- [#3021](https://github.com/influxdb/influxdb/pull/3021): Correct set HTTP write trace logging. Thanks @vladlopes. -- [#3027](https://github.com/influxdb/influxdb/pull/3027): Enforce minimum retention policy duration of 1 hour. -- [#3030](https://github.com/influxdb/influxdb/pull/3030): Fix excessive logging of shard creation. -- [#3038](https://github.com/influxdb/influxdb/pull/3038): Don't check deleted shards for precreation. Thanks @vladlopes. -- [#3033](https://github.com/influxdb/influxdb/pull/3033): Add support for marshaling `uint64` in client. -- [#3090](https://github.com/influxdb/influxdb/pull/3090): Remove database from TSDB index on DROP DATABASE. -- [#2944](https://github.com/influxdb/influxdb/issues/2944): Don't require "WHERE time" when creating continuous queries. -- [#3075](https://github.com/influxdb/influxdb/pull/3075): GROUP BY correctly when different tags have same value. -- [#3078](https://github.com/influxdb/influxdb/pull/3078): Fix CLI panic on malformed INSERT. -- [#2102](https://github.com/influxdb/influxdb/issues/2102): Re-work Graphite input and metric processing -- [#2996](https://github.com/influxdb/influxdb/issues/2996): Graphite Input Parsing -- [#3136](https://github.com/influxdb/influxdb/pull/3136): Fix various issues with init.d script. Thanks @ miguelcnf. -- [#2996](https://github.com/influxdb/influxdb/issues/2996): Graphite Input Parsing -- [#3127](https://github.com/influxdb/influxdb/issues/3127): Trying to insert a number larger than the largest signed 64-bit number kills influxd -- [#3131](https://github.com/influxdb/influxdb/pull/3131): Copy batch tags to each point before marshalling -- [#3155](https://github.com/influxdb/influxdb/pull/3155): Instantiate UDP batcher before listening for UDP traffic, otherwise a panic may result. -- [#2678](https://github.com/influxdb/influxdb/issues/2678): Server allows tags with an empty string for the key and/or value -- [#3061](https://github.com/influxdb/influxdb/issues/3061): syntactically incorrect line protocol insert panics the database -- [#2608](https://github.com/influxdb/influxdb/issues/2608): drop measurement while writing points to that measurement has race condition that can panic -- [#3183](https://github.com/influxdb/influxdb/issues/3183): using line protocol measurement names cannot contain commas -- [#3193](https://github.com/influxdb/influxdb/pull/3193): Fix panic for SHOW STATS and in collectd -- [#3102](https://github.com/influxdb/influxdb/issues/3102): Add authentication cache -- [#3209](https://github.com/influxdb/influxdb/pull/3209): Dump Run() errors to stderr -- [#3217](https://github.com/influxdb/influxdb/pull/3217): Allow WAL partition flush delay to be configurable. - -## v0.9.0 [2015-06-11] - -### Bugfixes - -- [#2869](https://github.com/influxdb/influxdb/issues/2869): Adding field to existing measurement causes panic -- [#2849](https://github.com/influxdb/influxdb/issues/2849): RC32: Frequent write errors -- [#2700](https://github.com/influxdb/influxdb/issues/2700): Incorrect error message in database EncodeFields -- [#2897](https://github.com/influxdb/influxdb/pull/2897): Ensure target Graphite database exists -- [#2898](https://github.com/influxdb/influxdb/pull/2898): Ensure target openTSDB database exists -- [#2895](https://github.com/influxdb/influxdb/pull/2895): Use Graphite input defaults where necessary -- [#2900](https://github.com/influxdb/influxdb/pull/2900): Use openTSDB input defaults where necessary -- [#2886](https://github.com/influxdb/influxdb/issues/2886): Refactor backup & restore -- [#2804](https://github.com/influxdb/influxdb/pull/2804): BREAKING: change time literals to be single quoted in InfluxQL. Thanks @nvcook42! -- [#2906](https://github.com/influxdb/influxdb/pull/2906): Restrict replication factor to the cluster size -- [#2905](https://github.com/influxdb/influxdb/pull/2905): Restrict clusters to 3 peers -- [#2904](https://github.com/influxdb/influxdb/pull/2904): Re-enable server reporting. -- [#2917](https://github.com/influxdb/influxdb/pull/2917): Fix int64 field values. -- [#2920](https://github.com/influxdb/influxdb/issues/2920): Ensure collectd database exists - -## v0.9.0-rc33 [2015-06-09] - -### Bugfixes - -- [#2816](https://github.com/influxdb/influxdb/pull/2816): Enable UDP service. Thanks @renan- -- [#2824](https://github.com/influxdb/influxdb/pull/2824): Add missing call to WaitGroup.Done in execConn. Thanks @liyichao -- [#2823](https://github.com/influxdb/influxdb/pull/2823): Convert OpenTSDB to a service. -- [#2838](https://github.com/influxdb/influxdb/pull/2838): Set auto-created retention policy period to infinite. -- [#2829](https://github.com/influxdb/influxdb/pull/2829): Re-enable Graphite support as a new Service-style component. -- [#2814](https://github.com/influxdb/influxdb/issues/2814): Convert collectd to a service. -- [#2852](https://github.com/influxdb/influxdb/pull/2852): Don't panic when altering retention policies. Thanks for the report @huhongbo -- [#2857](https://github.com/influxdb/influxdb/issues/2857): Fix parsing commas in string field values. -- [#2833](https://github.com/influxdb/influxdb/pull/2833): Make the default config valid. -- [#2859](https://github.com/influxdb/influxdb/pull/2859): Fix panic on aggregate functions. -- [#2878](https://github.com/influxdb/influxdb/pull/2878): Re-enable shard precreation. -- [2865](https://github.com/influxdb/influxdb/pull/2865) -- Return an empty set of results if database does not exist in shard metadata. - -### Features -- [2858](https://github.com/influxdb/influxdb/pull/2858): Support setting openTSDB write consistency. - -## v0.9.0-rc32 [2015-06-07] - -### Release Notes - -This released introduced an updated write path and clustering design. The data format has also changed, so you'll need to wipe out your data to upgrade from RC31. There should be no other data changes before v0.9.0 is released. - -### Features -- [#1997](https://github.com/influxdb/influxdb/pull/1997): Update SELECT * to return tag values. -- [#2599](https://github.com/influxdb/influxdb/issues/2599): Add "epoch" URL param and return JSON time values as epoch instead of date strings. -- [#2682](https://github.com/influxdb/influxdb/issues/2682): Adding pr checklist to CONTRIBUTING.md -- [#2683](https://github.com/influxdb/influxdb/issues/2683): Add batching support to Graphite inputs. -- [#2687](https://github.com/influxdb/influxdb/issues/2687): Add batching support to Collectd inputs. -- [#2696](https://github.com/influxdb/influxdb/pull/2696): Add line protocol. This is now the preferred way to write data. -- [#2751](https://github.com/influxdb/influxdb/pull/2751): Add UDP input. UDP only supports the line protocol now. -- [#2684](https://github.com/influxdb/influxdb/pull/2684): Include client timeout configuration. Thanks @vladlopes! - -### Bugfixes -- [#2776](https://github.com/influxdb/influxdb/issues/2776): Re-implement retention policy enforcement. -- [#2635](https://github.com/influxdb/influxdb/issues/2635): Fix querying against boolean field in WHERE clause. -- [#2644](https://github.com/influxdb/influxdb/issues/2644): Make SHOW queries work with FROM //. -- [#2501](https://github.com/influxdb/influxdb/issues/2501): Name the FlagSet for the shell and add a version flag. Thanks @neonstalwart -- [#2647](https://github.com/influxdb/influxdb/issues/2647): Fixes typos in sample config file - thanks @claws! - -## v0.9.0-rc31 [2015-05-21] - -### Features -- [#1822](https://github.com/influxdb/influxdb/issues/1822): Wire up DERIVATIVE aggregate -- [#1477](https://github.com/influxdb/influxdb/issues/1477): Wire up non_negative_derivative function -- [#2557](https://github.com/influxdb/influxdb/issues/2557): Fix false positive error with `GROUP BY time` -- [#1891](https://github.com/influxdb/influxdb/issues/1891): Wire up COUNT DISTINCT aggregate -- [#1989](https://github.com/influxdb/influxdb/issues/1989): Implement `SELECT tagName FROM m` - -### Bugfixes -- [#2545](https://github.com/influxdb/influxdb/pull/2545): Use "value" as the field name for graphite input. Thanks @cannium. -- [#2558](https://github.com/influxdb/influxdb/pull/2558): Fix client response check - thanks @vladlopes! -- [#2566](https://github.com/influxdb/influxdb/pull/2566): Wait until each data write has been commited by the Raft cluster. -- [#2602](https://github.com/influxdb/influxdb/pull/2602): CLI execute command exits without cleaning up liner package. -- [#2610](https://github.com/influxdb/influxdb/pull/2610): Fix shard group creation -- [#2596](https://github.com/influxdb/influxdb/pull/2596): RC30: `panic: runtime error: index out of range` when insert data points. -- [#2592](https://github.com/influxdb/influxdb/pull/2592): Should return an error if user attempts to group by a field. -- [#2499](https://github.com/influxdb/influxdb/pull/2499): Issuing a select query with tag as a values causes panic. -- [#2612](https://github.com/influxdb/influxdb/pull/2612): Query planner should validate distinct is passed a field. -- [#2531](https://github.com/influxdb/influxdb/issues/2531): Fix select with 3 or more terms in where clause. -- [#2564](https://github.com/influxdb/influxdb/issues/2564): Change "name" to "measurement" in JSON for writes. - -## PRs -- [#2569](https://github.com/influxdb/influxdb/pull/2569): Add derivative functions -- [#2598](https://github.com/influxdb/influxdb/pull/2598): Implement tag support in SELECT statements -- [#2624](https://github.com/influxdb/influxdb/pull/2624): Remove references to SeriesID in `DROP SERIES` handlers. - -## v0.9.0-rc30 [2015-05-12] - -### Release Notes - -This release has a breaking API change for writes -- the field previously called `timestamp` has been renamed to `time`. - -### Features -- [#2254](https://github.com/influxdb/influxdb/pull/2254): Add Support for OpenTSDB HTTP interface. Thanks @tcolgate -- [#2525](https://github.com/influxdb/influxdb/pull/2525): Serve broker diagnostics over HTTP -- [#2186](https://github.com/influxdb/influxdb/pull/2186): The default status code for queries is now `200 OK` -- [#2298](https://github.com/influxdb/influxdb/pull/2298): Successful writes now return a status code of `204 No Content` - thanks @neonstalwart! -- [#2549](https://github.com/influxdb/influxdb/pull/2549): Raft election timeout to 5 seconds, so system is more forgiving of CPU loads. -- [#2568](https://github.com/influxdb/influxdb/pull/2568): Wire up SELECT DISTINCT. - -### Bugfixes -- [#2535](https://github.com/influxdb/influxdb/pull/2535): Return exit status 0 if influxd already running. Thanks @haim0n. -- [#2521](https://github.com/influxdb/influxdb/pull/2521): Don't truncate topic data until fully replicated. -- [#2509](https://github.com/influxdb/influxdb/pull/2509): Parse config file correctly during restore. Thanks @neonstalwart -- [#2536](https://github.com/influxdb/influxdb/issues/2532): Set leader ID on restart of single-node cluster. -- [#2448](https://github.com/influxdb/influxdb/pull/2448): Fix inconsistent data type - thanks @cannium! -- [#2108](https://github.com/influxdb/influxdb/issues/2108): Change `timestamp` to `time` - thanks @neonstalwart! -- [#2539](https://github.com/influxdb/influxdb/issues/2539): Add additional vote request logging. -- [#2541](https://github.com/influxdb/influxdb/issues/2541): Update messaging client connection index with every message. -- [#2542](https://github.com/influxdb/influxdb/issues/2542): Throw parser error for invalid aggregate without where time. -- [#2548](https://github.com/influxdb/influxdb/issues/2548): Return an error when numeric aggregate applied to non-numeric data. -- [#2487](https://github.com/influxdb/influxdb/issues/2487): Aggregate query with exact timestamp causes panic. Thanks @neonstalwart! -- [#2552](https://github.com/influxdb/influxdb/issues/2552): Run CQ that is actually passed into go-routine. -- [#2553](https://github.com/influxdb/influxdb/issues/2553): Fix race condition during CQ execution. -- [#2557](https://github.com/influxdb/influxdb/issues/2557): RC30 WHERE time filter Regression. - -## v0.9.0-rc29 [2015-05-05] - -### Features -- [#2410](https://github.com/influxdb/influxdb/pull/2410): If needed, brokers respond with data nodes for peer shard replication. -- [#2469](https://github.com/influxdb/influxdb/pull/2469): Reduce default max topic size from 1GB to 50MB. -- [#1824](https://github.com/influxdb/influxdb/pull/1824): Wire up MEDIAN aggregate. Thanks @neonstalwart! - -### Bugfixes -- [#2446](https://github.com/influxdb/influxdb/pull/2446): Correctly count number of queries executed. Thanks @neonstalwart -- [#2452](https://github.com/influxdb/influxdb/issues/2452): Fix panic with shard stats on multiple clusters -- [#2453](https://github.com/influxdb/influxdb/pull/2453): Do not require snapshot on Log.WriteEntriesTo(). -- [#2460](https://github.com/influxdb/influxdb/issues/2460): Collectd input should use "value" for fields values. Fixes 2412. Thanks @josh-padnick -- [#2465](https://github.com/influxdb/influxdb/pull/2465): HTTP response logging paniced with chunked requests. Thanks @Jackkoz -- [#2475](https://github.com/influxdb/influxdb/pull/2475): RLock server when checking if shards groups are required during write. -- [#2471](https://github.com/influxdb/influxdb/issues/2471): Function calls normalized to be lower case. Fixes percentile not working when called uppercase. Thanks @neonstalwart -- [#2281](https://github.com/influxdb/influxdb/issues/2281): Fix Bad Escape error when parsing regex - -## v0.9.0-rc28 [2015-04-27] - -### Features -- [#2410](https://github.com/influxdb/influxdb/pull/2410) Allow configuration of Raft timers -- [#2354](https://github.com/influxdb/influxdb/pull/2354) Wire up STDDEV. Thanks @neonstalwart! - -### Bugfixes -- [#2374](https://github.com/influxdb/influxdb/issues/2374): Two different panics during SELECT percentile -- [#2404](https://github.com/influxdb/influxdb/pull/2404): Mean and percentile function fixes -- [#2408](https://github.com/influxdb/influxdb/pull/2408): Fix snapshot 500 error -- [#1896](https://github.com/influxdb/influxdb/issues/1896): Excessive heartbeater logging of "connection refused" on cluster node stop -- [#2418](https://github.com/influxdb/influxdb/pull/2418): Fix raft node getting stuck in candidate state -- [#2415](https://github.com/influxdb/influxdb/pull/2415): Raft leader ID now set on election after failover. Thanks @xiaost -- [#2426](https://github.com/influxdb/influxdb/pull/2426): Fix race condition around listener address in openTSDB server. -- [#2426](https://github.com/influxdb/influxdb/pull/2426): Fix race condition around listener address in Graphite server. -- [#2429](https://github.com/influxdb/influxdb/pull/2429): Ensure no field value is null. -- [#2431](https://github.com/influxdb/influxdb/pull/2431): Always append shard path in diags. Thanks @marcosnils -- [#2441](https://github.com/influxdb/influxdb/pull/2441): Correctly release server RLock during "drop series". -- [#2445](https://github.com/influxdb/influxdb/pull/2445): Read locks and data race fixes - -## v0.9.0-rc27 [04-23-2015] - -### Features -- [#2398](https://github.com/influxdb/influxdb/pull/2398) Track more stats and report errors for shards. - -### Bugfixes -- [#2370](https://github.com/influxdb/influxdb/pull/2370): Fix data race in openTSDB endpoint. -- [#2371](https://github.com/influxdb/influxdb/pull/2371): Don't set client to nil when closing broker Fixes #2352 -- [#2372](https://github.com/influxdb/influxdb/pull/2372): Fix data race in graphite endpoint. -- [#2373](https://github.com/influxdb/influxdb/pull/2373): Actually allow HTTP logging to be controlled. -- [#2376](https://github.com/influxdb/influxdb/pull/2376): Encode all types of integers. Thanks @jtakkala. -- [#2376](https://github.com/influxdb/influxdb/pull/2376): Add shard path to existing diags value. Fix issue #2369. -- [#2386](https://github.com/influxdb/influxdb/pull/2386): Fix shard datanodes stats getting appended too many times -- [#2393](https://github.com/influxdb/influxdb/pull/2393): Fix default hostname for connecting to cluster. -- [#2390](https://github.com/influxdb/influxdb/pull/2390): Handle large sums when calculating means - thanks @neonstalwart! -- [#2391](https://github.com/influxdb/influxdb/pull/2391): Unable to write points through Go client when authentication enabled -- [#2400](https://github.com/influxdb/influxdb/pull/2400): Always send auth headers for client requests if present - -## v0.9.0-rc26 [04-21-2015] - -### Features -- [#2301](https://github.com/influxdb/influxdb/pull/2301): Distributed query load balancing and failover -- [#2336](https://github.com/influxdb/influxdb/pull/2336): Handle distributed queries when shards != data nodes -- [#2353](https://github.com/influxdb/influxdb/pull/2353): Distributed Query/Clustering Fixes - -### Bugfixes -- [#2297](https://github.com/influxdb/influxdb/pull/2297): create /var/run during startup. Thanks @neonstalwart. -- [#2312](https://github.com/influxdb/influxdb/pull/2312): Re-use httpclient for continuous queries -- [#2318](https://github.com/influxdb/influxdb/pull/2318): Remove pointless use of 'done' channel for collectd. -- [#2242](https://github.com/influxdb/influxdb/pull/2242): Distributed Query should balance requests -- [#2243](https://github.com/influxdb/influxdb/pull/2243): Use Limit Reader instead of fixed 1MB/1GB slice for DQ -- [#2190](https://github.com/influxdb/influxdb/pull/2190): Implement failover to other data nodes for distributed queries -- [#2324](https://github.com/influxdb/influxdb/issues/2324): Race in Broker.Close()/Broker.RunContinousQueryProcessing() -- [#2325](https://github.com/influxdb/influxdb/pull/2325): Cluster open fixes -- [#2326](https://github.com/influxdb/influxdb/pull/2326): Fix parse error in CREATE CONTINUOUS QUERY -- [#2300](https://github.com/influxdb/influxdb/pull/2300): Refactor integration tests. Properly close Graphite/OpenTSDB listeners. -- [#2338](https://github.com/influxdb/influxdb/pull/2338): Fix panic if tag key isn't double quoted when it should have been -- [#2340](https://github.com/influxdb/influxdb/pull/2340): Fix SHOW DIAGNOSTICS panic if any shard was non-local. -- [#2351](https://github.com/influxdb/influxdb/pull/2351): Fix data race by rlocking shard during diagnostics. -- [#2348](https://github.com/influxdb/influxdb/pull/2348): Data node fail to join cluster in 0.9.0rc25 -- [#2343](https://github.com/influxdb/influxdb/pull/2343): Node falls behind Metastore updates -- [#2334](https://github.com/influxdb/influxdb/pull/2334): Test Partial replication is very problematic -- [#2272](https://github.com/influxdb/influxdb/pull/2272): clustering: influxdb 0.9.0-rc23 panics when doing a GET with merge_metrics in a -- [#2350](https://github.com/influxdb/influxdb/pull/2350): Issue fix for :influxd -hostname localhost. -- [#2367](https://github.com/influxdb/influxdb/pull/2367): PR for issue #2350 - Always use localhost, not host name. - -## v0.9.0-rc25 [2015-04-15] - -### Bugfixes -- [#2282](https://github.com/influxdb/influxdb/pull/2282): Use "value" as field name for OpenTSDB input. -- [#2283](https://github.com/influxdb/influxdb/pull/2283): Fix bug when restarting an entire existing cluster. -- [#2293](https://github.com/influxdb/influxdb/pull/2293): Open cluster listener before starting broker. -- [#2287](https://github.com/influxdb/influxdb/pull/2287): Fix data race during SHOW RETENTION POLICIES. -- [#2288](https://github.com/influxdb/influxdb/pull/2288): Fix expression parsing bug. -- [#2294](https://github.com/influxdb/influxdb/pull/2294): Fix async response flushing (invalid chunked response error). - -## Features -- [#2276](https://github.com/influxdb/influxdb/pull/2276): Broker topic truncation. -- [#2292](https://github.com/influxdb/influxdb/pull/2292): Wire up drop CQ statement - thanks @neonstalwart! -- [#2290](https://github.com/influxdb/influxdb/pull/2290): Allow hostname argument to override default config - thanks @neonstalwart! -- [#2295](https://github.com/influxdb/influxdb/pull/2295): Use nil as default return value for MapCount - thanks @neonstalwart! -- [#2246](https://github.com/influxdb/influxdb/pull/2246): Allow HTTP logging to be controlled. - -## v0.9.0-rc24 [2015-04-13] - -### Bugfixes -- [#2255](https://github.com/influxdb/influxdb/pull/2255): Fix panic when changing default retention policy. -- [#2257](https://github.com/influxdb/influxdb/pull/2257): Add "snapshotting" pseudo state & log entry cache. -- [#2261](https://github.com/influxdb/influxdb/pull/2261): Support int64 value types. -- [#2191](https://github.com/influxdb/influxdb/pull/2191): Case-insensitive check for "fill" -- [#2274](https://github.com/influxdb/influxdb/pull/2274): Snapshot and HTTP API endpoints -- [#2265](https://github.com/influxdb/influxdb/pull/2265): Fix auth for CLI. - -## v0.9.0-rc23 [2015-04-11] - -### Features -- [#2202](https://github.com/influxdb/influxdb/pull/2202): Initial implementation of Distributed Queries -- [#2202](https://github.com/influxdb/influxdb/pull/2202): 64-bit Series IDs. INCOMPATIBLE WITH PREVIOUS DATASTORES. - -### Bugfixes -- [#2225](https://github.com/influxdb/influxdb/pull/2225): Make keywords completely case insensitive -- [#2228](https://github.com/influxdb/influxdb/pull/2228): Accept keyword default unquoted in ALTER RETENTION POLICY statement -- [#2236](https://github.com/influxdb/influxdb/pull/2236): Immediate term changes, fix stale write issue, net/http/pprof -- [#2213](https://github.com/influxdb/influxdb/pull/2213): Seed random number generator for election timeout. Thanks @cannium. - -## v0.9.0-rc22 [2015-04-09] - -### Features -- [#2214](https://github.com/influxdb/influxdb/pull/2214): Added the option to influx CLI to execute single command and exit. Thanks @n1tr0g - -### Bugfixes -- [#2223](https://github.com/influxdb/influxdb/pull/2223): Always notify term change on RequestVote - -## v0.9.0-rc21 [2015-04-09] - -### Features -- [#870](https://github.com/influxdb/influxdb/pull/870): Add support for OpenTSDB telnet input protocol. Thanks @tcolgate -- [#2180](https://github.com/influxdb/influxdb/pull/2180): Allow http write handler to decode gzipped body -- [#2175](https://github.com/influxdb/influxdb/pull/2175): Separate broker and data nodes -- [#2158](https://github.com/influxdb/influxdb/pull/2158): Allow user password to be changed. Thanks @n1tr0g -- [#2201](https://github.com/influxdb/influxdb/pull/2201): Bring back config join URLs -- [#2121](https://github.com/influxdb/influxdb/pull/2121): Parser refactor - -### Bugfixes -- [#2181](https://github.com/influxdb/influxdb/pull/2181): Fix panic on "SHOW DIAGNOSTICS". -- [#2170](https://github.com/influxdb/influxdb/pull/2170): Make sure queries on missing tags return 200 status. -- [#2197](https://github.com/influxdb/influxdb/pull/2197): Lock server during Open(). -- [#2200](https://github.com/influxdb/influxdb/pull/2200): Re-enable Continuous Queries. -- [#2203](https://github.com/influxdb/influxdb/pull/2203): Fix race condition on continuous queries. -- [#2217](https://github.com/influxdb/influxdb/pull/2217): Only revert to follower if new term is greater. -- [#2219](https://github.com/influxdb/influxdb/pull/2219): Persist term change to disk when candidate. Thanks @cannium - -## v0.9.0-rc20 [2015-04-04] - -### Features -- [#2128](https://github.com/influxdb/influxdb/pull/2128): Data node discovery from brokers -- [#2142](https://github.com/influxdb/influxdb/pull/2142): Support chunked queries -- [#2154](https://github.com/influxdb/influxdb/pull/2154): Node redirection -- [#2168](https://github.com/influxdb/influxdb/pull/2168): Return raft term from vote, add term logging - -### Bugfixes -- [#2147](https://github.com/influxdb/influxdb/pull/2147): Set Go Max procs in a better location -- [#2137](https://github.com/influxdb/influxdb/pull/2137): Refactor `results` to `response`. Breaking Go Client change. -- [#2151](https://github.com/influxdb/influxdb/pull/2151): Ignore replay commands on the metastore. -- [#2152](https://github.com/influxdb/influxdb/issues/2152): Influxd process with stats enabled crashing with 'Unsuported protocol scheme for ""' -- [#2156](https://github.com/influxdb/influxdb/pull/2156): Propagate error when resolving UDP address in Graphite UDP server. -- [#2163](https://github.com/influxdb/influxdb/pull/2163): Fix up paths for default data and run storage. -- [#2164](https://github.com/influxdb/influxdb/pull/2164): Append STDOUT/STDERR in initscript. -- [#2165](https://github.com/influxdb/influxdb/pull/2165): Better name for config section for stats and diags. -- [#2165](https://github.com/influxdb/influxdb/pull/2165): Monitoring database and retention policy are not configurable. -- [#2167](https://github.com/influxdb/influxdb/pull/2167): Add broker log recovery. -- [#2166](https://github.com/influxdb/influxdb/pull/2166): Don't panic if presented with a field of unknown type. -- [#2149](https://github.com/influxdb/influxdb/pull/2149): Fix unit tests for win32 when directory doesn't exist. -- [#2150](https://github.com/influxdb/influxdb/pull/2150): Fix unit tests for win32 when a connection is refused. - -## v0.9.0-rc19 [2015-04-01] - -### Features -- [#2143](https://github.com/influxdb/influxdb/pull/2143): Add raft term logging. - -### Bugfixes -- [#2145](https://github.com/influxdb/influxdb/pull/2145): Encode toml durations correctly which fixes default configuration generation `influxd config`. - -## v0.9.0-rc18 [2015-03-31] - -### Bugfixes -- [#2100](https://github.com/influxdb/influxdb/pull/2100): Use channel to synchronize collectd shutdown. -- [#2100](https://github.com/influxdb/influxdb/pull/2100): Synchronize access to shard index. -- [#2131](https://github.com/influxdb/influxdb/pull/2131): Optimize marshalTags(). -- [#2130](https://github.com/influxdb/influxdb/pull/2130): Make fewer calls to marshalTags(). -- [#2105](https://github.com/influxdb/influxdb/pull/2105): Support != for tag values. Fix issue #2097, thanks to @smonkewitz for bug report. -- [#2105](https://github.com/influxdb/influxdb/pull/2105): Support !~ tags values. -- [#2138](https://github.com/influxdb/influxdb/pull/2136): Use map for marshaledTags cache. - -## v0.9.0-rc17 [2015-03-29] - -### Features -- [#2076](https://github.com/influxdb/influxdb/pull/2076): Separate stdout and stderr output in init.d script -- [#2091](https://github.com/influxdb/influxdb/pull/2091): Support disabling snapshot endpoint. -- [#2081](https://github.com/influxdb/influxdb/pull/2081): Support writing diagnostic data into the internal database. -- [#2095](https://github.com/influxdb/influxdb/pull/2095): Improved InfluxDB client docs. Thanks @derailed - -### Bugfixes -- [#2093](https://github.com/influxdb/influxdb/pull/2093): Point precision not marshalled correctly. Thanks @derailed -- [#2084](https://github.com/influxdb/influxdb/pull/2084): Allowing leading underscores in identifiers. -- [#2080](https://github.com/influxdb/influxdb/pull/2080): Graphite logs in seconds, not milliseconds. -- [#2101](https://github.com/influxdb/influxdb/pull/2101): SHOW DATABASES should name returned series "databases". -- [#2104](https://github.com/influxdb/influxdb/pull/2104): Include NEQ when calculating field filters. -- [#2112](https://github.com/influxdb/influxdb/pull/2112): Set GOMAXPROCS on startup. This may have been causing extra leader elections, which would cause a number of other bugs or instability. -- [#2111](https://github.com/influxdb/influxdb/pull/2111) and [#2025](https://github.com/influxdb/influxdb/issues/2025): Raft stability fixes. Non-contiguous log error and others. -- [#2114](https://github.com/influxdb/influxdb/pull/2114): Correctly start influxd on platforms without start-stop-daemon. - -## v0.9.0-rc16 [2015-03-24] - -### Features -- [#2058](https://github.com/influxdb/influxdb/pull/2058): Track number of queries executed in stats. -- [#2059](https://github.com/influxdb/influxdb/pull/2059): Retention policies sorted by name on return to client. -- [#2061](https://github.com/influxdb/influxdb/pull/2061): Implement SHOW DIAGNOSTICS. -- [#2064](https://github.com/influxdb/influxdb/pull/2064): Allow init.d script to return influxd version. -- [#2053](https://github.com/influxdb/influxdb/pull/2053): Implment backup and restore. -- [#1631](https://github.com/influxdb/influxdb/pull/1631): Wire up DROP CONTINUOUS QUERY. - -### Bugfixes -- [#2037](https://github.com/influxdb/influxdb/pull/2037): Don't check 'configExists' at Run() level. -- [#2039](https://github.com/influxdb/influxdb/pull/2039): Don't panic if getting current user fails. -- [#2034](https://github.com/influxdb/influxdb/pull/2034): GROUP BY should require an aggregate. -- [#2040](https://github.com/influxdb/influxdb/pull/2040): Add missing top-level help for config command. -- [#2057](https://github.com/influxdb/influxdb/pull/2057): Move racy "in order" test to integration test suite. -- [#2060](https://github.com/influxdb/influxdb/pull/2060): Reload server shard map on restart. -- [#2068](https://github.com/influxdb/influxdb/pull/2068): Fix misspelled JSON field. -- [#2067](https://github.com/influxdb/influxdb/pull/2067): Fixed issue where some queries didn't properly pull back data (introduced in RC15). Fixing intervals for GROUP BY. - -## v0.9.0-rc15 [2015-03-19] - -### Features -- [#2000](https://github.com/influxdb/influxdb/pull/2000): Log broker path when broker fails to start. Thanks @gst. -- [#2007](https://github.com/influxdb/influxdb/pull/2007): Track shard-level stats. - -### Bugfixes -- [#2001](https://github.com/influxdb/influxdb/pull/2001): Ensure measurement not found returns status code 200. -- [#1985](https://github.com/influxdb/influxdb/pull/1985): Set content-type JSON header before actually writing header. Thanks @dstrek. -- [#2003](https://github.com/influxdb/influxdb/pull/2003): Set timestamp when writing monitoring stats. -- [#2004](https://github.com/influxdb/influxdb/pull/2004): Limit group by to MaxGroupByPoints (currently 100,000). -- [#2016](https://github.com/influxdb/influxdb/pull/2016): Fixing bucket alignment for group by. Thanks @jnutzmann -- [#2021](https://github.com/influxdb/influxdb/pull/2021): Remove unnecessary formatting from log message. Thanks @simonkern - - -## v0.9.0-rc14 [2015-03-18] - -### Bugfixes -- [#1999](https://github.com/influxdb/influxdb/pull/1999): Return status code 200 for measurement not found errors on show series. - -## v0.9.0-rc13 [2015-03-17] - -### Features -- [#1974](https://github.com/influxdb/influxdb/pull/1974): Add time taken for request to the http server logs. - -### Bugfixes -- [#1971](https://github.com/influxdb/influxdb/pull/1971): Fix leader id initialization. -- [#1975](https://github.com/influxdb/influxdb/pull/1975): Require `q` parameter for query endpoint. -- [#1969](https://github.com/influxdb/influxdb/pull/1969): Print loaded config. -- [#1987](https://github.com/influxdb/influxdb/pull/1987): Fix config print startup statement for when no config is provided. -- [#1990](https://github.com/influxdb/influxdb/pull/1990): Drop measurement was taking too long due to transactions. - -## v0.9.0-rc12 [2015-03-15] - -### Bugfixes -- [#1942](https://github.com/influxdb/influxdb/pull/1942): Sort wildcard names. -- [#1957](https://github.com/influxdb/influxdb/pull/1957): Graphite numbers are always float64. -- [#1955](https://github.com/influxdb/influxdb/pull/1955): Prohibit creation of databases with no name. Thanks @dullgiulio -- [#1952](https://github.com/influxdb/influxdb/pull/1952): Handle delete statement with an error. Thanks again to @dullgiulio - -### Features -- [#1935](https://github.com/influxdb/influxdb/pull/1935): Implement stateless broker for Raft. -- [#1936](https://github.com/influxdb/influxdb/pull/1936): Implement "SHOW STATS" and self-monitoring - -### Features -- [#1909](https://github.com/influxdb/influxdb/pull/1909): Implement a dump command. - -## v0.9.0-rc11 [2015-03-13] - -### Bugfixes -- [#1917](https://github.com/influxdb/influxdb/pull/1902): Creating Infinite Retention Policy Failed. -- [#1758](https://github.com/influxdb/influxdb/pull/1758): Add Graphite Integration Test. -- [#1929](https://github.com/influxdb/influxdb/pull/1929): Default Retention Policy incorrectly auto created. -- [#1930](https://github.com/influxdb/influxdb/pull/1930): Auto create database for graphite if not specified. -- [#1908](https://github.com/influxdb/influxdb/pull/1908): Cosmetic CLI output fixes. -- [#1931](https://github.com/influxdb/influxdb/pull/1931): Add default column to SHOW RETENTION POLICIES. -- [#1937](https://github.com/influxdb/influxdb/pull/1937): OFFSET should be allowed to be 0. - -### Features -- [#1902](https://github.com/influxdb/influxdb/pull/1902): Enforce retention policies to have a minimum duration. -- [#1906](https://github.com/influxdb/influxdb/pull/1906): Add show servers to query language. -- [#1925](https://github.com/influxdb/influxdb/pull/1925): Add `fill(none)`, `fill(previous)`, and `fill()` to queries. - -## v0.9.0-rc10 [2015-03-09] - -### Bugfixes -- [#1867](https://github.com/influxdb/influxdb/pull/1867): Fix race accessing topic replicas map -- [#1864](https://github.com/influxdb/influxdb/pull/1864): fix race in startStateLoop -- [#1753](https://github.com/influxdb/influxdb/pull/1874): Do Not Panic on Missing Dirs -- [#1877](https://github.com/influxdb/influxdb/pull/1877): Broker clients track broker leader -- [#1862](https://github.com/influxdb/influxdb/pull/1862): Fix memory leak in `httpd.serveWait`. Thanks @mountkin -- [#1883](https://github.com/influxdb/influxdb/pull/1883): RLock server during retention policy enforcement. Thanks @grisha -- [#1868](https://github.com/influxdb/influxdb/pull/1868): Use `BatchPoints` for `client.Write` method. Thanks @vladlopes, @georgmu, @d2g, @evanphx, @akolosov. -- [#1881](https://github.com/influxdb/influxdb/pull/1881): Update documentation for `client` package. Misc library tweaks. -- Fix queries with multiple where clauses on tags, times and fields. Fix queries that have where clauses on fields not in the select - -### Features -- [#1875](https://github.com/influxdb/influxdb/pull/1875): Support trace logging of Raft. -- [#1895](https://github.com/influxdb/influxdb/pull/1895): Auto-create a retention policy when a database is created. -- [#1897](https://github.com/influxdb/influxdb/pull/1897): Pre-create shard groups. -- [#1900](https://github.com/influxdb/influxdb/pull/1900): Change `LIMIT` to `SLIMIT` and implement `LIMIT` and `OFFSET` - -## v0.9.0-rc9 [2015-03-06] - -### Bugfixes -- [#1872](https://github.com/influxdb/influxdb/pull/1872): Fix "stale term" errors with raft - -## v0.9.0-rc8 [2015-03-05] - -### Bugfixes -- [#1836](https://github.com/influxdb/influxdb/pull/1836): Store each parsed shell command in history file. -- [#1789](https://github.com/influxdb/influxdb/pull/1789): add --config-files option to fpm command. Thanks @kylezh -- [#1859](https://github.com/influxdb/influxdb/pull/1859): Queries with a `GROUP BY *` clause were returning a 500 if done against a measurement that didn't exist - -### Features -- [#1755](https://github.com/influxdb/influxdb/pull/1848): Support JSON data ingest over UDP -- [#1857](https://github.com/influxdb/influxdb/pull/1857): Support retention policies with infinite duration -- [#1858](https://github.com/influxdb/influxdb/pull/1858): Enable detailed tracing of write path - -## v0.9.0-rc7 [2015-03-02] - -### Features -- [#1813](https://github.com/influxdb/influxdb/pull/1813): Queries for missing measurements or fields now return a 200 with an error message in the series JSON. -- [#1826](https://github.com/influxdb/influxdb/pull/1826), [#1827](https://github.com/influxdb/influxdb/pull/1827): Fixed queries with `WHERE` clauses against fields. - -### Bugfixes - -- [#1744](https://github.com/influxdb/influxdb/pull/1744): Allow retention policies to be modified without specifying replication factor. Thanks @kylezh -- [#1809](https://github.com/influxdb/influxdb/pull/1809): Packaging post-install script unconditionally removes init.d symlink. Thanks @sineos - -## v0.9.0-rc6 [2015-02-27] - -### Bugfixes - -- [#1780](https://github.com/influxdb/influxdb/pull/1780): Malformed identifiers get through the parser -- [#1775](https://github.com/influxdb/influxdb/pull/1775): Panic "index out of range" on some queries -- [#1744](https://github.com/influxdb/influxdb/pull/1744): Select shard groups which completely encompass time range. Thanks @kylezh. - -## v0.9.0-rc5 [2015-02-27] - -### Bugfixes - -- [#1752](https://github.com/influxdb/influxdb/pull/1752): remove debug log output from collectd. -- [#1720](https://github.com/influxdb/influxdb/pull/1720): Parse Series IDs as unsigned 32-bits. -- [#1767](https://github.com/influxdb/influxdb/pull/1767): Drop Series was failing across shards. Issue #1761. -- [#1773](https://github.com/influxdb/influxdb/pull/1773): Fix bug when merging series together that have unequal number of points in a group by interval -- [#1771](https://github.com/influxdb/influxdb/pull/1771): Make `SHOW SERIES` return IDs and support `LIMIT` and `OFFSET` - -### Features - -- [#1698](https://github.com/influxdb/influxdb/pull/1698): Wire up DROP MEASUREMENT - -## v0.9.0-rc4 [2015-02-24] - -### Bugfixes - -- Fix authentication issue with continuous queries -- Print version in the log on startup - -## v0.9.0-rc3 [2015-02-23] - -### Features - -- [#1659](https://github.com/influxdb/influxdb/pull/1659): WHERE against regexes: `WHERE =~ '.*asdf' -- [#1580](https://github.com/influxdb/influxdb/pull/1580): Add support for fields with bool, int, or string data types -- [#1687](https://github.com/influxdb/influxdb/pull/1687): Change `Rows` to `Series` in results output. BREAKING API CHANGE -- [#1629](https://github.com/influxdb/influxdb/pull/1629): Add support for `DROP SERIES` queries -- [#1632](https://github.com/influxdb/influxdb/pull/1632): Add support for `GROUP BY *` to return all series within a measurement -- [#1689](https://github.com/influxdb/influxdb/pull/1689): Change `SHOW TAG VALUES WITH KEY="foo"` to use the key name in the result. BREAKING API CHANGE -- [#1699](https://github.com/influxdb/influxdb/pull/1699): Add CPU and memory profiling options to daemon -- [#1672](https://github.com/influxdb/influxdb/pull/1672): Add index tracking to metastore. Makes downed node recovery actually work -- [#1591](https://github.com/influxdb/influxdb/pull/1591): Add `spread` aggregate function -- [#1576](https://github.com/influxdb/influxdb/pull/1576): Add `first` and `last` aggregate functions -- [#1573](https://github.com/influxdb/influxdb/pull/1573): Add `stddev` aggregate function -- [#1565](https://github.com/influxdb/influxdb/pull/1565): Add the admin interface back into the server and update for new API -- [#1562](https://github.com/influxdb/influxdb/pull/1562): Enforce retention policies -- [#1700](https://github.com/influxdb/influxdb/pull/1700): Change `Values` to `Fields` on writes. BREAKING API CHANGE -- [#1706](https://github.com/influxdb/influxdb/pull/1706): Add support for `LIMIT` and `OFFSET`, which work on the number of series returned in a query. To limit the number of data points use a `WHERE time` clause - -### Bugfixes - -- [#1636](https://github.com/influxdb/influxdb/issues/1636): Don't store number of fields in raw data. THIS IS A BREAKING DATA CHANGE. YOU MUST START WITH A FRESH DATABASE -- [#1701](https://github.com/influxdb/influxdb/pull/1701), [#1667](https://github.com/influxdb/influxdb/pull/1667), [#1663](https://github.com/influxdb/influxdb/pull/1663), [#1615](https://github.com/influxdb/influxdb/pull/1615): Raft fixes -- [#1644](https://github.com/influxdb/influxdb/pull/1644): Add batching support for significantly improved write performance -- [#1704](https://github.com/influxdb/influxdb/pull/1704): Fix queries that pull back raw data (i.e. ones without aggregate functions) -- [#1718](https://github.com/influxdb/influxdb/pull/1718): Return an error on write if any of the points are don't have at least one field -- [#1806](https://github.com/influxdb/influxdb/pull/1806): Fix regex parsing. Change regex syntax to use / delimiters. - - -## v0.9.0-rc1,2 [no public release] - -### Features - -- Support for tags added -- New queries for showing measurement names, tag keys, and tag values -- Renamed shard spaces to retention policies -- Deprecated matching against regex in favor of explicit writing and querying on retention policies -- Pure Go InfluxQL parser -- Switch to BoltDB as underlying datastore -- BoltDB backed metastore to store schema information -- Updated HTTP API to only have two endpoints `/query` and `/write` -- Added all administrative functions to the query language -- Change cluster architecture to have brokers and data nodes -- Switch to streaming Raft implementation -- In memory inverted index of the tag data -- Pure Go implementation! - -## v0.8.6 [2014-11-15] - -### Features - -- [Issue #973](https://github.com/influxdb/influxdb/issues/973). Support - joining using a regex or list of time series -- [Issue #1068](https://github.com/influxdb/influxdb/issues/1068). Print - the processor chain when the query is started - -### Bugfixes - -- [Issue #584](https://github.com/influxdb/influxdb/issues/584). Don't - panic if the process died while initializing -- [Issue #663](https://github.com/influxdb/influxdb/issues/663). Make - sure all sub servies are closed when are stopping InfluxDB -- [Issue #671](https://github.com/influxdb/influxdb/issues/671). Fix - the Makefile package target for Mac OSX -- [Issue #800](https://github.com/influxdb/influxdb/issues/800). Use - su instead of sudo in the init script. This fixes the startup problem - on RHEL 6. -- [Issue #925](https://github.com/influxdb/influxdb/issues/925). Don't - generate invalid query strings for single point queries -- [Issue #943](https://github.com/influxdb/influxdb/issues/943). Don't - take two snapshots at the same time -- [Issue #947](https://github.com/influxdb/influxdb/issues/947). Exit - nicely if the daemon doesn't have permission to write to the log. -- [Issue #959](https://github.com/influxdb/influxdb/issues/959). Stop using - closed connections in the protobuf client. -- [Issue #978](https://github.com/influxdb/influxdb/issues/978). Check - for valgrind and mercurial in the configure script -- [Issue #996](https://github.com/influxdb/influxdb/issues/996). Fill should - fill the time range even if no points exists in the given time range -- [Issue #1008](https://github.com/influxdb/influxdb/issues/1008). Return - an appropriate exit status code depending on whether the process exits - due to an error or exits gracefully. -- [Issue #1024](https://github.com/influxdb/influxdb/issues/1024). Hitting - open files limit causes influxdb to create shards in loop. -- [Issue #1069](https://github.com/influxdb/influxdb/issues/1069). Fix - deprecated interface endpoint in Admin UI. -- [Issue #1076](https://github.com/influxdb/influxdb/issues/1076). Fix - the timestamps of data points written by the collectd plugin. (Thanks, - @renchap for reporting this bug) -- [Issue #1078](https://github.com/influxdb/influxdb/issues/1078). Make sure - we don't resurrect shard directories for shards that have already expired -- [Issue #1085](https://github.com/influxdb/influxdb/issues/1085). Set - the connection string of the local raft node -- [Issue #1092](https://github.com/influxdb/influxdb/issues/1093). Set - the connection string of the local node in the raft snapshot. -- [Issue #1100](https://github.com/influxdb/influxdb/issues/1100). Removing - a non-existent shard space causes the cluster to panic. -- [Issue #1113](https://github.com/influxdb/influxdb/issues/1113). A nil - engine.ProcessorChain causes a panic. - -## v0.8.5 [2014-10-27] - -### Features - -- [Issue #1055](https://github.com/influxdb/influxdb/issues/1055). Allow - graphite and collectd input plugins to have separate binding address - -### Bugfixes - -- [Issue #1058](https://github.com/influxdb/influxdb/issues/1058). Use - the query language instead of the continuous query endpoints that - were removed in 0.8.4 -- [Issue #1022](https://github.com/influxdb/influxdb/issues/1022). Return - an +Inf or NaN instead of panicing when we encounter a divide by zero -- [Issue #821](https://github.com/influxdb/influxdb/issues/821). Don't - scan through points when we hit the limit -- [Issue #1051](https://github.com/influxdb/influxdb/issues/1051). Fix - timestamps when the collectd is used and low resolution timestamps - is set. - -## v0.8.4 [2014-10-24] - -### Bugfixes - -- Remove the continuous query api endpoints since the query language - has all the features needed to list and delete continuous queries. -- [Issue #778](https://github.com/influxdb/influxdb/issues/778). Selecting - from a non-existent series should give a better error message indicating - that the series doesn't exist -- [Issue #988](https://github.com/influxdb/influxdb/issues/988). Check - the arguments of `top()` and `bottom()` -- [Issue #1021](https://github.com/influxdb/influxdb/issues/1021). Make - redirecting to standard output and standard error optional instead of - going to `/dev/null`. This can now be configured by setting `$STDOUT` - in `/etc/default/influxdb` -- [Issue #985](https://github.com/influxdb/influxdb/issues/985). Make - sure we drop a shard only when there's no one using it. Otherwise, the - shard can be closed when another goroutine is writing to it which will - cause random errors and possibly corruption of the database. - -### Features - -- [Issue #1047](https://github.com/influxdb/influxdb/issues/1047). Allow - merge() to take a list of series (as opposed to a regex in #72) - -## v0.8.4-rc.1 [2014-10-21] - -### Bugfixes - -- [Issue #1040](https://github.com/influxdb/influxdb/issues/1040). Revert - to older raft snapshot if the latest one is corrupted -- [Issue #1004](https://github.com/influxdb/influxdb/issues/1004). Querying - for data outside of existing shards returns an empty response instead of - throwing a `Couldn't lookup columns` error -- [Issue #1020](https://github.com/influxdb/influxdb/issues/1020). Change - init script exit codes to conform to the lsb standards. (Thanks, @spuder) -- [Issue #1011](https://github.com/influxdb/influxdb/issues/1011). Fix - the tarball for homebrew so that rocksdb is included and the directory - structure is clean -- [Issue #1007](https://github.com/influxdb/influxdb/issues/1007). Fix - the content type when an error occurs and the client requests - compression. -- [Issue #916](https://github.com/influxdb/influxdb/issues/916). Set - the ulimit in the init script with a way to override the limit -- [Issue #742](https://github.com/influxdb/influxdb/issues/742). Fix - rocksdb for Mac OSX -- [Issue #387](https://github.com/influxdb/influxdb/issues/387). Aggregations - with group by time(1w), time(1m) and time(1y) (for week, month and - year respectively) will cause the start time and end time of the bucket - to fall on the logical boundaries of the week, month or year. -- [Issue #334](https://github.com/influxdb/influxdb/issues/334). Derivative - for queries with group by time() and fill(), will take the difference - between the first value in the bucket and the first value of the next - bucket. -- [Issue #972](https://github.com/influxdb/influxdb/issues/972). Don't - assign duplicate server ids - -### Features - -- [Issue #722](https://github.com/influxdb/influxdb/issues/722). Add - an install target to the Makefile -- [Issue #1032](https://github.com/influxdb/influxdb/issues/1032). Include - the admin ui static assets in the binary -- [Issue #1019](https://github.com/influxdb/influxdb/issues/1019). Upgrade - to rocksdb 3.5.1 -- [Issue #992](https://github.com/influxdb/influxdb/issues/992). Add - an input plugin for collectd. (Thanks, @kimor79) -- [Issue #72](https://github.com/influxdb/influxdb/issues/72). Support merge - for multiple series using regex syntax - -## v0.8.3 [2014-09-24] - -### Bugfixes - -- [Issue #885](https://github.com/influxdb/influxdb/issues/885). Multiple - queries separated by semicolons work as expected. Queries are process - sequentially -- [Issue #652](https://github.com/influxdb/influxdb/issues/652). Return an - error if an invalid column is used in the where clause -- [Issue #794](https://github.com/influxdb/influxdb/issues/794). Fix case - insensitive regex matching -- [Issue #853](https://github.com/influxdb/influxdb/issues/853). Move - cluster config from raft to API. -- [Issue #714](https://github.com/influxdb/influxdb/issues/714). Don't - panic on invalid boolean operators. -- [Issue #843](https://github.com/influxdb/influxdb/issues/843). Prevent blank database names -- [Issue #780](https://github.com/influxdb/influxdb/issues/780). Fix - fill() for all aggregators -- [Issue #923](https://github.com/influxdb/influxdb/issues/923). Enclose - table names in double quotes in the result of GetQueryString() -- [Issue #923](https://github.com/influxdb/influxdb/issues/923). Enclose - table names in double quotes in the result of GetQueryString() -- [Issue #967](https://github.com/influxdb/influxdb/issues/967). Return an - error if the storage engine can't be created -- [Issue #954](https://github.com/influxdb/influxdb/issues/954). Don't automatically - create shards which was causing too many shards to be created when used with - grafana -- [Issue #939](https://github.com/influxdb/influxdb/issues/939). Aggregation should - ignore null values and invalid values, e.g. strings with mean(). -- [Issue #964](https://github.com/influxdb/influxdb/issues/964). Parse - big int in queries properly. - -## v0.8.2 [2014-09-05] - -### Bugfixes - -- [Issue #886](https://github.com/influxdb/influxdb/issues/886). Update shard space to not set defaults - -- [Issue #867](https://github.com/influxdb/influxdb/issues/867). Add option to return shard space mappings in list series - -### Bugfixes - -- [Issue #652](https://github.com/influxdb/influxdb/issues/652). Return - a meaningful error if an invalid column is used in where clause - after joining multiple series - -## v0.8.2 [2014-09-08] - -### Features - -- Added API endpoint to update shard space definitions - -### Bugfixes - -- [Issue #886](https://github.com/influxdb/influxdb/issues/886). Shard space regexes reset after restart of InfluxDB - -## v0.8.1 [2014-09-03] - -- [Issue #896](https://github.com/influxdb/influxdb/issues/896). Allow logging to syslog. Thanks @malthe - -### Bugfixes - -- [Issue #868](https://github.com/influxdb/influxdb/issues/868). Don't panic when upgrading a snapshot from 0.7.x -- [Issue #887](https://github.com/influxdb/influxdb/issues/887). The first continuous query shouldn't trigger backfill if it had backfill disabled -- [Issue #674](https://github.com/influxdb/influxdb/issues/674). Graceful exit when config file is invalid. (Thanks, @DavidBord) -- [Issue #857](https://github.com/influxdb/influxdb/issues/857). More informative list servers api. (Thanks, @oliveagle) - -## v0.8.0 [2014-08-22] - -### Features - -- [Issue #850](https://github.com/influxdb/influxdb/issues/850). Makes the server listing more informative - -### Bugfixes - -- [Issue #779](https://github.com/influxdb/influxdb/issues/779). Deleting expired shards isn't thread safe. -- [Issue #860](https://github.com/influxdb/influxdb/issues/860). Load database config should validate shard spaces. -- [Issue #862](https://github.com/influxdb/influxdb/issues/862). Data migrator should have option to set delay time. - -## v0.8.0-rc.5 [2014-08-15] - -### Features - -- [Issue #376](https://github.com/influxdb/influxdb/issues/376). List series should support regex filtering -- [Issue #745](https://github.com/influxdb/influxdb/issues/745). Add continuous queries to the database config -- [Issue #746](https://github.com/influxdb/influxdb/issues/746). Add data migration tool for 0.8.0 - -### Bugfixes - -- [Issue #426](https://github.com/influxdb/influxdb/issues/426). Fill should fill the entire time range that is requested -- [Issue #740](https://github.com/influxdb/influxdb/issues/740). Don't emit non existent fields when joining series with different fields -- [Issue #744](https://github.com/influxdb/influxdb/issues/744). Admin site should have all assets locally -- [Issue #767](https://github.com/influxdb/influxdb/issues/768). Remove shards whenever they expire -- [Issue #781](https://github.com/influxdb/influxdb/issues/781). Don't emit non existent fields when joining series with different fields -- [Issue #791](https://github.com/influxdb/influxdb/issues/791). Move database config loader to be an API endpoint -- [Issue #809](https://github.com/influxdb/influxdb/issues/809). Migration path from 0.7 -> 0.8 -- [Issue #811](https://github.com/influxdb/influxdb/issues/811). Gogoprotobuf removed `ErrWrongType`, which is depended on by Raft -- [Issue #820](https://github.com/influxdb/influxdb/issues/820). Query non-local shard with time range to avoid getting back points not in time range -- [Issue #827](https://github.com/influxdb/influxdb/issues/827). Don't leak file descriptors in the WAL -- [Issue #830](https://github.com/influxdb/influxdb/issues/830). List series should return series in lexicographic sorted order -- [Issue #831](https://github.com/influxdb/influxdb/issues/831). Move create shard space to be db specific - -## v0.8.0-rc.4 [2014-07-29] - -### Bugfixes - -- [Issue #774](https://github.com/influxdb/influxdb/issues/774). Don't try to parse "inf" shard retention policy -- [Issue #769](https://github.com/influxdb/influxdb/issues/769). Use retention duration when determining expired shards. (Thanks, @shugo) -- [Issue #736](https://github.com/influxdb/influxdb/issues/736). Only db admins should be able to drop a series -- [Issue #713](https://github.com/influxdb/influxdb/issues/713). Null should be a valid fill value -- [Issue #644](https://github.com/influxdb/influxdb/issues/644). Graphite api should write data in batches to the coordinator -- [Issue #740](https://github.com/influxdb/influxdb/issues/740). Panic when distinct fields are selected from an inner join -- [Issue #781](https://github.com/influxdb/influxdb/issues/781). Panic when distinct fields are added after an inner join - -## v0.8.0-rc.3 [2014-07-21] - -### Bugfixes - -- [Issue #752](https://github.com/influxdb/influxdb/issues/752). `./configure` should use goroot to find gofmt -- [Issue #758](https://github.com/influxdb/influxdb/issues/758). Clarify the reason behind graphite input plugin not starting. (Thanks, @otoolep) -- [Issue #759](https://github.com/influxdb/influxdb/issues/759). Don't revert the regex in the shard space. (Thanks, @shugo) -- [Issue #760](https://github.com/influxdb/influxdb/issues/760). Removing a server should remove it from the shard server ids. (Thanks, @shugo) -- [Issue #772](https://github.com/influxdb/influxdb/issues/772). Add sentinel values to all db. This caused the last key in the db to not be fetched properly. - - -## v0.8.0-rc.2 [2014-07-15] - -- This release is to fix a build error in rc1 which caused rocksdb to not be available -- Bump up the `max-open-files` option to 1000 on all storage engines -- Lower the `write-buffer-size` to 1000 - -## v0.8.0-rc.1 [2014-07-15] - -### Features - -- [Issue #643](https://github.com/influxdb/influxdb/issues/643). Support pretty print json. (Thanks, @otoolep) -- [Issue #641](https://github.com/influxdb/influxdb/issues/641). Support multiple storage engines -- [Issue #665](https://github.com/influxdb/influxdb/issues/665). Make build tmp directory configurable in the make file. (Thanks, @dgnorton) -- [Issue #667](https://github.com/influxdb/influxdb/issues/667). Enable compression on all GET requests and when writing data -- [Issue #648](https://github.com/influxdb/influxdb/issues/648). Return permissions when listing db users. (Thanks, @nicolai86) -- [Issue #682](https://github.com/influxdb/influxdb/issues/682). Allow continuous queries to run without backfill (Thanks, @dhammika) -- [Issue #689](https://github.com/influxdb/influxdb/issues/689). **REQUIRES DATA MIGRATION** Move metadata into raft -- [Issue #255](https://github.com/influxdb/influxdb/issues/255). Support millisecond precision using `ms` suffix -- [Issue #95](https://github.com/influxdb/influxdb/issues/95). Drop database should not be synchronous -- [Issue #571](https://github.com/influxdb/influxdb/issues/571). Add support for arbitrary number of shard spaces and retention policies -- Default storage engine changed to RocksDB - -### Bugfixes - -- [Issue #651](https://github.com/influxdb/influxdb/issues/651). Change permissions of symlink which fix some installation issues. (Thanks, @Dieterbe) -- [Issue #670](https://github.com/influxdb/influxdb/issues/670). Don't warn on missing influxdb user on fresh installs -- [Issue #676](https://github.com/influxdb/influxdb/issues/676). Allow storing high precision integer values without losing any information -- [Issue #695](https://github.com/influxdb/influxdb/issues/695). Prevent having duplicate field names in the write payload. (Thanks, @seunglee150) -- [Issue #731](https://github.com/influxdb/influxdb/issues/731). Don't enable the udp plugin if the `enabled` option is set to false -- [Issue #733](https://github.com/influxdb/influxdb/issues/733). Print an `INFO` message when the input plugin is disabled -- [Issue #707](https://github.com/influxdb/influxdb/issues/707). Graphite input plugin should work payload delimited by any whitespace character -- [Issue #734](https://github.com/influxdb/influxdb/issues/734). Don't buffer non replicated writes -- [Issue #465](https://github.com/influxdb/influxdb/issues/465). Recreating a currently deleting db or series doesn't bring back the old data anymore -- [Issue #358](https://github.com/influxdb/influxdb/issues/358). **BREAKING** List series should return as a single series -- [Issue #499](https://github.com/influxdb/influxdb/issues/499). **BREAKING** Querying non-existent database or series will return an error -- [Issue #570](https://github.com/influxdb/influxdb/issues/570). InfluxDB crashes during delete/drop of database -- [Issue #592](https://github.com/influxdb/influxdb/issues/592). Drop series is inefficient - -## v0.7.3 [2014-06-13] - -### Bugfixes - -- [Issue #637](https://github.com/influxdb/influxdb/issues/637). Truncate log files if the last request wasn't written properly -- [Issue #646](https://github.com/influxdb/influxdb/issues/646). CRITICAL: Duplicate shard ids for new shards if old shards are deleted. - -## v0.7.2 [2014-05-30] - -### Features - -- [Issue #521](https://github.com/influxdb/influxdb/issues/521). MODE works on all datatypes (Thanks, @richthegeek) - -### Bugfixes - -- [Issue #418](https://github.com/influxdb/influxdb/pull/418). Requests or responses larger than MAX_REQUEST_SIZE break things. -- [Issue #606](https://github.com/influxdb/influxdb/issues/606). InfluxDB will fail to start with invalid permission if log.txt didn't exist -- [Issue #602](https://github.com/influxdb/influxdb/issues/602). Merge will fail to work across shards - -### Features - -## v0.7.1 [2014-05-29] - -### Bugfixes - -- [Issue #579](https://github.com/influxdb/influxdb/issues/579). Reject writes to nonexistent databases -- [Issue #597](https://github.com/influxdb/influxdb/issues/597). Force compaction after deleting data - -### Features - -- [Issue #476](https://github.com/influxdb/influxdb/issues/476). Support ARM architecture -- [Issue #578](https://github.com/influxdb/influxdb/issues/578). Support aliasing for expressions in parenthesis -- [Issue #544](https://github.com/influxdb/influxdb/pull/544). Support forcing node removal from a cluster -- [Issue #591](https://github.com/influxdb/influxdb/pull/591). Support multiple udp input plugins (Thanks, @tpitale) -- [Issue #600](https://github.com/influxdb/influxdb/pull/600). Report version, os, arch, and raftName once per day. - -## v0.7.0 [2014-05-23] - -### Bugfixes - -- [Issue #557](https://github.com/influxdb/influxdb/issues/557). Group by time(1y) doesn't work while time(365d) works -- [Issue #547](https://github.com/influxdb/influxdb/issues/547). Add difference function (Thanks, @mboelstra) -- [Issue #550](https://github.com/influxdb/influxdb/issues/550). Fix tests on 32-bit ARM -- [Issue #524](https://github.com/influxdb/influxdb/issues/524). Arithmetic operators and where conditions don't play nice together -- [Issue #561](https://github.com/influxdb/influxdb/issues/561). Fix missing query in parsing errors -- [Issue #563](https://github.com/influxdb/influxdb/issues/563). Add sample config for graphite over udp -- [Issue #537](https://github.com/influxdb/influxdb/issues/537). Incorrect query syntax causes internal error -- [Issue #565](https://github.com/influxdb/influxdb/issues/565). Empty series names shouldn't cause a panic -- [Issue #575](https://github.com/influxdb/influxdb/issues/575). Single point select doesn't interpret timestamps correctly -- [Issue #576](https://github.com/influxdb/influxdb/issues/576). We shouldn't set timestamps and sequence numbers when listing cq -- [Issue #560](https://github.com/influxdb/influxdb/issues/560). Use /dev/urandom instead of /dev/random -- [Issue #502](https://github.com/influxdb/influxdb/issues/502). Fix a - race condition in assigning id to db+series+field (Thanks @ohurvitz - for reporting this bug and providing a script to repro) - -### Features - -- [Issue #567](https://github.com/influxdb/influxdb/issues/567). Allow selecting from multiple series names by separating them with commas (Thanks, @peekeri) - -### Deprecated - -- [Issue #460](https://github.com/influxdb/influxdb/issues/460). Don't start automatically after installing -- [Issue #529](https://github.com/influxdb/influxdb/issues/529). Don't run influxdb as root -- [Issue #443](https://github.com/influxdb/influxdb/issues/443). Use `name` instead of `username` when returning cluster admins - -## v0.6.5 [2014-05-19] - -### Features - -- [Issue #551](https://github.com/influxdb/influxdb/issues/551). Add TOP and BOTTOM aggregate functions (Thanks, @chobie) - -### Bugfixes - -- [Issue #555](https://github.com/influxdb/influxdb/issues/555). Fix a regression introduced in the raft snapshot format - -## v0.6.4 [2014-05-16] - -### Features - -- Make the write batch size configurable (also applies to deletes) -- Optimize writing to multiple series -- [Issue #546](https://github.com/influxdb/influxdb/issues/546). Add UDP support for Graphite API (Thanks, @peekeri) - -### Bugfixes - -- Fix a bug in shard logic that caused short term shards to be clobbered with long term shards -- [Issue #489](https://github.com/influxdb/influxdb/issues/489). Remove replication factor from CreateDatabase command - -## v0.6.3 [2014-05-13] - -### Features - -- [Issue #505](https://github.com/influxdb/influxdb/issues/505). Return a version header with http the response (Thanks, @majst01) -- [Issue #520](https://github.com/influxdb/influxdb/issues/520). Print the version to the log file - -### Bugfixes - -- [Issue #516](https://github.com/influxdb/influxdb/issues/516). Close WAL log/index files when they aren't being used -- [Issue #532](https://github.com/influxdb/influxdb/issues/532). Don't log graphite connection EOF as an error -- [Issue #535](https://github.com/influxdb/influxdb/issues/535). WAL Replay hangs if response isn't received -- [Issue #538](https://github.com/influxdb/influxdb/issues/538). Don't panic if the same series existed twice in the request with different columns -- [Issue #536](https://github.com/influxdb/influxdb/issues/536). Joining the cluster after shards are creating shouldn't cause new nodes to panic -- [Issue #539](https://github.com/influxdb/influxdb/issues/539). count(distinct()) with fill shouldn't panic on empty groups -- [Issue #534](https://github.com/influxdb/influxdb/issues/534). Create a new series when interpolating - -## v0.6.2 [2014-05-09] - -### Bugfixes - -- [Issue #511](https://github.com/influxdb/influxdb/issues/511). Don't automatically create the database when a db user is created -- [Issue #512](https://github.com/influxdb/influxdb/issues/512). Group by should respect null values -- [Issue #518](https://github.com/influxdb/influxdb/issues/518). Filter Infinities and NaNs from the returned json -- [Issue #522](https://github.com/influxdb/influxdb/issues/522). Committing requests while replaying caused the WAL to skip some log files -- [Issue #369](https://github.com/influxdb/influxdb/issues/369). Fix some edge cases with WAL recovery - -## v0.6.1 [2014-05-06] - -### Bugfixes - -- [Issue #500](https://github.com/influxdb/influxdb/issues/500). Support `y` suffix in time durations -- [Issue #501](https://github.com/influxdb/influxdb/issues/501). Writes with invalid payload should be rejected -- [Issue #507](https://github.com/influxdb/influxdb/issues/507). New cluster admin passwords don't propagate properly to other nodes in a cluster -- [Issue #508](https://github.com/influxdb/influxdb/issues/508). Don't replay WAL entries for servers with no shards -- [Issue #464](https://github.com/influxdb/influxdb/issues/464). Admin UI shouldn't draw graphs for string columns -- [Issue #480](https://github.com/influxdb/influxdb/issues/480). Large values on the y-axis get cut off - -## v0.6.0 [2014-05-02] - -### Feature - -- [Issue #477](https://github.com/influxdb/influxdb/issues/477). Add a udp json interface (Thanks, Julien Ammous) -- [Issue #491](https://github.com/influxdb/influxdb/issues/491). Make initial root password settable through env variable (Thanks, Edward Muller) - -### Bugfixes - -- [Issue #469](https://github.com/influxdb/influxdb/issues/469). Drop continuous queries when a database is dropped -- [Issue #431](https://github.com/influxdb/influxdb/issues/431). Don't log to standard output if a log file is specified in the config file -- [Issue #483](https://github.com/influxdb/influxdb/issues/483). Return 409 if a database already exist (Thanks, Edward Muller) -- [Issue #486](https://github.com/influxdb/influxdb/issues/486). Columns used in the target of continuous query shouldn't be inserted in the time series -- [Issue #490](https://github.com/influxdb/influxdb/issues/490). Database user password's cannot be changed (Thanks, Edward Muller) -- [Issue #495](https://github.com/influxdb/influxdb/issues/495). Enforce write permissions properly - -## v0.5.12 [2014-04-29] - -### Bugfixes - -- [Issue #419](https://github.com/influxdb/influxdb/issues/419),[Issue #478](https://github.com/influxdb/influxdb/issues/478). Allow hostname, raft and protobuf ports to be changed, without requiring manual intervention from the user - -## v0.5.11 [2014-04-25] - -### Features - -- [Issue #471](https://github.com/influxdb/influxdb/issues/471). Read and write permissions should be settable through the http api - -### Bugfixes - -- [Issue #323](https://github.com/influxdb/influxdb/issues/323). Continuous queries should guard against data loops -- [Issue #473](https://github.com/influxdb/influxdb/issues/473). Engine memory optimization - -## v0.5.10 [2014-04-22] - -### Features - -- [Issue #463](https://github.com/influxdb/influxdb/issues/463). Allow series names to use any character (escape by wrapping in double quotes) -- [Issue #447](https://github.com/influxdb/influxdb/issues/447). Allow @ in usernames -- [Issue #466](https://github.com/influxdb/influxdb/issues/466). Allow column names to use any character (escape by wrapping in double quotes) - -### Bugfixes - -- [Issue #458](https://github.com/influxdb/influxdb/issues/458). Continuous queries with group by time() and a column should insert sequence numbers of 1 -- [Issue #457](https://github.com/influxdb/influxdb/issues/457). Deleting series that start with capital letters should work - -## v0.5.9 [2014-04-18] - -### Bugfixes - -- [Issue #446](https://github.com/influxdb/influxdb/issues/446). Check for (de)serialization errors -- [Issue #456](https://github.com/influxdb/influxdb/issues/456). Continuous queries failed if one of the group by columns had null value -- [Issue #455](https://github.com/influxdb/influxdb/issues/455). Comparison operators should ignore null values - -## v0.5.8 [2014-04-17] - -- Renamed config.toml.sample to config.sample.toml - -### Bugfixes - -- [Issue #244](https://github.com/influxdb/influxdb/issues/244). Reconstruct the query from the ast -- [Issue #449](https://github.com/influxdb/influxdb/issues/449). Heartbeat timeouts can cause reading from connection to lock up -- [Issue #451](https://github.com/influxdb/influxdb/issues/451). Reduce the aggregation state that is kept in memory so that - aggregation queries over large periods of time don't take insance amount of memory - -## v0.5.7 [2014-04-15] - -### Features - -- Queries are now logged as INFO in the log file before they run - -### Bugfixes - -- [Issue #328](https://github.com/influxdb/influxdb/issues/328). Join queries with math expressions don't work -- [Issue #440](https://github.com/influxdb/influxdb/issues/440). Heartbeat timeouts in logs -- [Issue #442](https://github.com/influxdb/influxdb/issues/442). shouldQuerySequentially didn't work as expected - causing count(*) queries on large time series to use - lots of memory -- [Issue #437](https://github.com/influxdb/influxdb/issues/437). Queries with negative constants don't parse properly -- [Issue #432](https://github.com/influxdb/influxdb/issues/432). Deleted data using a delete query is resurrected after a server restart -- [Issue #439](https://github.com/influxdb/influxdb/issues/439). Report the right location of the error in the query -- Fix some bugs with the WAL recovery on startup - -## v0.5.6 [2014-04-08] - -### Features - -- [Issue #310](https://github.com/influxdb/influxdb/issues/310). Request should support multiple timeseries -- [Issue #416](https://github.com/influxdb/influxdb/issues/416). Improve the time it takes to drop database - -### Bugfixes - -- [Issue #413](https://github.com/influxdb/influxdb/issues/413). Don't assume that group by interval is greater than a second -- [Issue #415](https://github.com/influxdb/influxdb/issues/415). Include the database when sending an auth error back to the user -- [Issue #421](https://github.com/influxdb/influxdb/issues/421). Make read timeout a config option -- [Issue #392](https://github.com/influxdb/influxdb/issues/392). Different columns in different shards returns invalid results when a query spans those shards - -### Bugfixes - -## v0.5.5 [2014-04-04] - -- Upgrade leveldb 1.10 -> 1.15 - - This should be a backward compatible change, but is here for documentation only - -### Feature - -- Add a command line option to repair corrupted leveldb databases on startup -- [Issue #401](https://github.com/influxdb/influxdb/issues/401). No limit on the number of columns in the group by clause - -### Bugfixes - -- [Issue #398](https://github.com/influxdb/influxdb/issues/398). Support now() and NOW() in the query lang -- [Issue #403](https://github.com/influxdb/influxdb/issues/403). Filtering should work with join queries -- [Issue #404](https://github.com/influxdb/influxdb/issues/404). Filtering with invalid condition shouldn't crash the server -- [Issue #405](https://github.com/influxdb/influxdb/issues/405). Percentile shouldn't crash for small number of values -- [Issue #408](https://github.com/influxdb/influxdb/issues/408). Make InfluxDB recover from internal bugs and panics -- [Issue #390](https://github.com/influxdb/influxdb/issues/390). Multiple response.WriteHeader when querying as admin -- [Issue #407](https://github.com/influxdb/influxdb/issues/407). Start processing continuous queries only after the WAL is initialized -- Close leveldb databases properly if we couldn't create a new Shard. See leveldb\_shard\_datastore\_test:131 - -## v0.5.4 [2014-04-02] - -### Bugfixes - -- [Issue #386](https://github.com/influxdb/influxdb/issues/386). Drop series should work with series containing dots -- [Issue #389](https://github.com/influxdb/influxdb/issues/389). Filtering shouldn't stop prematurely -- [Issue #341](https://github.com/influxdb/influxdb/issues/341). Make the number of shards that are queried in parallel configurable -- [Issue #394](https://github.com/influxdb/influxdb/issues/394). Support count(distinct) and count(DISTINCT) -- [Issue #362](https://github.com/influxdb/influxdb/issues/362). Limit should be enforced after aggregation - -## v0.5.3 [2014-03-31] - -### Bugfixes - -- [Issue #378](https://github.com/influxdb/influxdb/issues/378). Indexing should return if there are no requests added since the last index -- [Issue #370](https://github.com/influxdb/influxdb/issues/370). Filtering and limit should be enforced on the shards -- [Issue #379](https://github.com/influxdb/influxdb/issues/379). Boolean columns should be usable in where clauses -- [Issue #381](https://github.com/influxdb/influxdb/issues/381). Should be able to do deletes as a cluster admin - -## v0.5.2 [2014-03-28] - -### Bugfixes - -- [Issue #342](https://github.com/influxdb/influxdb/issues/342). Data resurrected after a server restart -- [Issue #367](https://github.com/influxdb/influxdb/issues/367). Influxdb won't start if the api port is commented out -- [Issue #355](https://github.com/influxdb/influxdb/issues/355). Return an error on wrong time strings -- [Issue #331](https://github.com/influxdb/influxdb/issues/331). Allow negative time values in the where clause -- [Issue #371](https://github.com/influxdb/influxdb/issues/371). Seris index isn't deleted when the series is dropped -- [Issue #360](https://github.com/influxdb/influxdb/issues/360). Store and recover continuous queries - -## v0.5.1 [2014-03-24] - -### Bugfixes - -- Revert the version of goraft due to a bug found in the latest version - -## v0.5.0 [2014-03-24] - -### Features - -- [Issue #293](https://github.com/influxdb/influxdb/pull/293). Implement a Graphite listener - -### Bugfixes - -- [Issue #340](https://github.com/influxdb/influxdb/issues/340). Writing many requests while replaying seems to cause commits out of order - -## v0.5.0-rc.6 [2014-03-20] - -### Bugfixes - -- Increase raft election timeout to avoid unecessary relections -- Sort points before writing them to avoid an explosion in the request - number when the points are written randomly -- [Issue #335](https://github.com/influxdb/influxdb/issues/335). Fixes regexp for interpolating more than one column value in continuous queries -- [Issue #318](https://github.com/influxdb/influxdb/pull/318). Support EXPLAIN queries -- [Issue #333](https://github.com/influxdb/influxdb/pull/333). Fail - when the password is too short or too long instead of passing it to - the crypto library - -## v0.5.0-rc.5 [2014-03-11] - -### Bugfixes - -- [Issue #312](https://github.com/influxdb/influxdb/issues/312). WAL should wait for server id to be set before recovering -- [Issue #301](https://github.com/influxdb/influxdb/issues/301). Use ref counting to guard against race conditions in the shard cache -- [Issue #319](https://github.com/influxdb/influxdb/issues/319). Propagate engine creation error correctly to the user -- [Issue #316](https://github.com/influxdb/influxdb/issues/316). Make - sure we don't starve goroutines if we get an access denied error - from one of the shards -- [Issue #306](https://github.com/influxdb/influxdb/issues/306). Deleting/Dropping database takes a lot of memory -- [Issue #302](https://github.com/influxdb/influxdb/issues/302). Should be able to set negative timestamps on points -- [Issue #327](https://github.com/influxdb/influxdb/issues/327). Make delete queries not use WAL. This addresses #315, #317 and #314 -- [Issue #321](https://github.com/influxdb/influxdb/issues/321). Make sure we split points on shards properly - -## v0.5.0-rc.4 [2014-03-07] - -### Bugfixes - -- [Issue #298](https://github.com/influxdb/influxdb/issues/298). Fix limit when querying multiple shards -- [Issue #305](https://github.com/influxdb/influxdb/issues/305). Shard ids not unique after restart -- [Issue #309](https://github.com/influxdb/influxdb/issues/309). Don't relog the requests on the remote server -- Fix few bugs in the WAL and refactor the way it works (this requires purging the WAL from previous rc) - -## v0.5.0-rc.3 [2014-03-03] - -### Bugfixes -- [Issue #69](https://github.com/influxdb/influxdb/issues/69). Support column aliases -- [Issue #287](https://github.com/influxdb/influxdb/issues/287). Make the lru cache size configurable -- [Issue #38](https://github.com/influxdb/influxdb/issues/38). Fix a memory leak discussed in this story -- [Issue #286](https://github.com/influxdb/influxdb/issues/286). Make the number of open shards configurable -- Make LevelDB use the max open files configuration option. - -## v0.5.0-rc.2 [2014-02-27] - -### Bugfixes - -- [Issue #274](https://github.com/influxdb/influxdb/issues/274). Crash after restart -- [Issue #277](https://github.com/influxdb/influxdb/issues/277). Ensure duplicate shards won't be created -- [Issue #279](https://github.com/influxdb/influxdb/issues/279). Limits not working on regex queries -- [Issue #281](https://github.com/influxdb/influxdb/issues/281). `./influxdb -v` should print the sha when building from source -- [Issue #283](https://github.com/influxdb/influxdb/issues/283). Dropping shard and restart in cluster causes panic. -- [Issue #288](https://github.com/influxdb/influxdb/issues/288). Sequence numbers should be unique per server id - -## v0.5.0-rc.1 [2014-02-25] - -### Bugfixes - -- Ensure large deletes don't take too much memory -- [Issue #240](https://github.com/influxdb/influxdb/pull/240). Unable to query against columns with `.` in the name. -- [Issue #250](https://github.com/influxdb/influxdb/pull/250). different result between normal and continuous query with "group by" clause -- [Issue #216](https://github.com/influxdb/influxdb/pull/216). Results with no points should exclude columns and points - -### Features - -- [Issue #243](https://github.com/influxdb/influxdb/issues/243). Should have endpoint to GET a user's attributes. -- [Issue #269](https://github.com/influxdb/influxdb/pull/269), [Issue #65](https://github.com/influxdb/influxdb/issues/65) New clustering architecture (see docs), with the side effect that queries can be distributed between multiple shards -- [Issue #164](https://github.com/influxdb/influxdb/pull/269),[Issue #103](https://github.com/influxdb/influxdb/pull/269),[Issue #166](https://github.com/influxdb/influxdb/pull/269),[Issue #165](https://github.com/influxdb/influxdb/pull/269),[Issue #132](https://github.com/influxdb/influxdb/pull/269) Make request log a log file instead of leveldb with recovery on startup - -### Deprecated - -- [Issue #189](https://github.com/influxdb/influxdb/issues/189). `/cluster_admins` and `/db/:db/users` return usernames in a `name` key instead of `username` key. -- [Issue #216](https://github.com/influxdb/influxdb/pull/216). Results with no points should exclude columns and points - -## v0.4.4 [2014-02-05] - -### Features - -- Make the leveldb max open files configurable in the toml file - -## v0.4.3 [2014-01-31] - -### Bugfixes - -- [Issue #225](https://github.com/influxdb/influxdb/issues/225). Remove a hard limit on the points returned by the datastore -- [Issue #223](https://github.com/influxdb/influxdb/issues/223). Null values caused count(distinct()) to panic -- [Issue #224](https://github.com/influxdb/influxdb/issues/224). Null values broke replication due to protobuf limitation - -## v0.4.1 [2014-01-30] - -### Features - -- [Issue #193](https://github.com/influxdb/influxdb/issues/193). Allow logging to stdout. Thanks @schmurfy -- [Issue #190](https://github.com/influxdb/influxdb/pull/190). Add support for SSL. -- [Issue #194](https://github.com/influxdb/influxdb/pull/194). Should be able to disable Admin interface. - -### Bugfixes - -- [Issue #33](https://github.com/influxdb/influxdb/issues/33). Don't call WriteHeader more than once per request -- [Issue #195](https://github.com/influxdb/influxdb/issues/195). Allow the bind address to be configurable, Thanks @schmurfy. -- [Issue #199](https://github.com/influxdb/influxdb/issues/199). Make the test timeout configurable -- [Issue #200](https://github.com/influxdb/influxdb/issues/200). Selecting `time` or `sequence_number` silently fail -- [Issue #215](https://github.com/influxdb/influxdb/pull/215). Server fails to start up after Raft log compaction and restart. - -## v0.4.0 [2014-01-17] - -## Features - -- [Issue #86](https://github.com/influxdb/influxdb/issues/86). Support arithmetic expressions in select clause -- [Issue #92](https://github.com/influxdb/influxdb/issues/92). Change '==' to '=' and '!=' to '<>' -- [Issue #88](https://github.com/influxdb/influxdb/issues/88). Support datetime strings -- [Issue #64](https://github.com/influxdb/influxdb/issues/64). Shard writes and queries across cluster with replay for briefly downed nodes (< 24 hrs) -- [Issue #78](https://github.com/influxdb/influxdb/issues/78). Sequence numbers persist across restarts so they're not reused -- [Issue #102](https://github.com/influxdb/influxdb/issues/102). Support expressions in where condition -- [Issue #101](https://github.com/influxdb/influxdb/issues/101). Support expressions in aggregates -- [Issue #62](https://github.com/influxdb/influxdb/issues/62). Support updating and deleting column values -- [Issue #96](https://github.com/influxdb/influxdb/issues/96). Replicate deletes in a cluster -- [Issue #94](https://github.com/influxdb/influxdb/issues/94). delete queries -- [Issue #116](https://github.com/influxdb/influxdb/issues/116). Use proper logging -- [Issue #40](https://github.com/influxdb/influxdb/issues/40). Use TOML instead of JSON in the config file -- [Issue #99](https://github.com/influxdb/influxdb/issues/99). Support list series in the query language -- [Issue #149](https://github.com/influxdb/influxdb/issues/149). Cluster admins should be able to perform reads and writes. -- [Issue #108](https://github.com/influxdb/influxdb/issues/108). Querying one point using `time =` -- [Issue #114](https://github.com/influxdb/influxdb/issues/114). Servers should periodically check that they're consistent. -- [Issue #93](https://github.com/influxdb/influxdb/issues/93). Should be able to drop a time series -- [Issue #177](https://github.com/influxdb/influxdb/issues/177). Support drop series in the query language. -- [Issue #184](https://github.com/influxdb/influxdb/issues/184). Implement Raft log compaction. -- [Issue #153](https://github.com/influxdb/influxdb/issues/153). Implement continuous queries - -### Bugfixes - -- [Issue #90](https://github.com/influxdb/influxdb/issues/90). Group by multiple columns panic -- [Issue #89](https://github.com/influxdb/influxdb/issues/89). 'Group by' combined with 'where' not working -- [Issue #106](https://github.com/influxdb/influxdb/issues/106). Don't panic if we only see one point and can't calculate derivative -- [Issue #105](https://github.com/influxdb/influxdb/issues/105). Panic when using a where clause that reference columns with null values -- [Issue #61](https://github.com/influxdb/influxdb/issues/61). Remove default limits from queries -- [Issue #118](https://github.com/influxdb/influxdb/issues/118). Make column names starting with '_' legal -- [Issue #121](https://github.com/influxdb/influxdb/issues/121). Don't fall back to the cluster admin auth if the db user auth fails -- [Issue #127](https://github.com/influxdb/influxdb/issues/127). Return error on delete queries with where condition that don't have time -- [Issue #117](https://github.com/influxdb/influxdb/issues/117). Fill empty groups with default values -- [Issue #150](https://github.com/influxdb/influxdb/pull/150). Fix parser for when multiple divisions look like a regex. -- [Issue #158](https://github.com/influxdb/influxdb/issues/158). Logged deletes should be stored with the time range if missing. -- [Issue #136](https://github.com/influxdb/influxdb/issues/136). Make sure writes are replicated in order to avoid triggering replays -- [Issue #145](https://github.com/influxdb/influxdb/issues/145). Server fails to join cluster if all starting at same time. -- [Issue #176](https://github.com/influxdb/influxdb/issues/176). Drop database should take effect on all nodes -- [Issue #180](https://github.com/influxdb/influxdb/issues/180). Column names not returned when running multi-node cluster and writing more than one point. -- [Issue #182](https://github.com/influxdb/influxdb/issues/182). Queries with invalid limit clause crash the server - -### Deprecated - -- deprecate '==' and '!=' in favor of '=' and '<>', respectively -- deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint -- deprecate `username` field for a more consistent `name` field in `/db/:db/users` and `/cluster_admins` -- deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should - be used to update user flags, password, etc. -- Querying for column names that don't exist no longer throws an error. - -## v0.3.2 - -## Features - -- [Issue #82](https://github.com/influxdb/influxdb/issues/82). Add endpoint for listing available admin interfaces. -- [Issue #80](https://github.com/influxdb/influxdb/issues/80). Support durations when specifying start and end time -- [Issue #81](https://github.com/influxdb/influxdb/issues/81). Add support for IN - -## Bugfixes - -- [Issue #75](https://github.com/influxdb/influxdb/issues/75). Don't allow time series names that start with underscore -- [Issue #85](https://github.com/influxdb/influxdb/issues/85). Non-existing columns exist after they have been queried before - -## v0.3.0 - -## Features - -- [Issue #51](https://github.com/influxdb/influxdb/issues/51). Implement first and last aggregates -- [Issue #35](https://github.com/influxdb/influxdb/issues/35). Support table aliases in Join Queries -- [Issue #71](https://github.com/influxdb/influxdb/issues/71). Add WillReturnSingleSeries to the Query -- [Issue #61](https://github.com/influxdb/influxdb/issues/61). Limit should default to 10k -- [Issue #59](https://github.com/influxdb/influxdb/issues/59). Add histogram aggregate function - -## Bugfixes - -- Fix join and merges when the query is a descending order query -- [Issue #57](https://github.com/influxdb/influxdb/issues/57). Don't panic when type of time != float -- [Issue #63](https://github.com/influxdb/influxdb/issues/63). Aggregate queries should not have a sequence_number column - -## v0.2.0 - -### Features - -- [Issue #37](https://github.com/influxdb/influxdb/issues/37). Support the negation of the regex matcher !~ -- [Issue #47](https://github.com/influxdb/influxdb/issues/47). Spill out query and database detail at the time of bug report - -### Bugfixes - -- [Issue #36](https://github.com/influxdb/influxdb/issues/36). The regex operator should be =~ not ~= -- [Issue #39](https://github.com/influxdb/influxdb/issues/39). Return proper content types from the http api -- [Issue #42](https://github.com/influxdb/influxdb/issues/42). Make the api consistent with the docs -- [Issue #41](https://github.com/influxdb/influxdb/issues/41). Table/Points not deleted when database is dropped -- [Issue #45](https://github.com/influxdb/influxdb/issues/45). Aggregation shouldn't mess up the order of the points -- [Issue #44](https://github.com/influxdb/influxdb/issues/44). Fix crashes on RHEL 5.9 -- [Issue #34](https://github.com/influxdb/influxdb/issues/34). Ascending order always return null for columns that have a null value -- [Issue #55](https://github.com/influxdb/influxdb/issues/55). Limit should limit the points that match the Where clause -- [Issue #53](https://github.com/influxdb/influxdb/issues/53). Writing null values via HTTP API fails - -### Deprecated - -- Preparing to deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint -- Preparing to deprecate `username` field for a more consistent `name` field in the `/db/:db/users` -- Preparing to deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should - be used to update user flags, password, etc. - -## v0.1.0 - -### Features - -- [Issue #29](https://github.com/influxdb/influxdb/issues/29). Semicolon is now optional in queries -- [Issue #31](https://github.com/influxdb/influxdb/issues/31). Support Basic Auth as well as query params for authentication. - -### Bugfixes - -- Don't allow creating users with empty username -- [Issue #22](https://github.com/influxdb/influxdb/issues/22). Don't set goroot if it was set -- [Issue #25](https://github.com/influxdb/influxdb/issues/25). Fix queries that use the median aggregator -- [Issue #26](https://github.com/influxdb/influxdb/issues/26). Default log and db directories should be in /opt/influxdb/shared/data -- [Issue #27](https://github.com/influxdb/influxdb/issues/27). Group by should not blow up if the one of the columns in group by has null values -- [Issue #30](https://github.com/influxdb/influxdb/issues/30). Column indexes/names getting off somehow -- [Issue #32](https://github.com/influxdb/influxdb/issues/32). Fix many typos in the codebase. Thanks @pborreli - -## v0.0.9 - -#### Features - -- Add stddev(...) support -- Better docs, thanks @auxesis and @d-snp. - -#### Bugfixes - -- Set PYTHONPATH and CC appropriately on mac os x. -- [Issue #18](https://github.com/influxdb/influxdb/issues/18). Fix 386 debian and redhat packages -- [Issue #23](https://github.com/influxdb/influxdb/issues/23). Fix the init scripts on redhat - -## v0.0.8 - -#### Features - -- Add a way to reset the root password from the command line. -- Add distinct(..) and derivative(...) support -- Print test coverage if running go1.2 - -#### Bugfixes - -- Fix the default admin site path in the .deb and .rpm packages. -- Fix the configuration filename in the .tar.gz package. - -## v0.0.7 - -#### Features - -- include the admin site in the repo to make it easier for newcomers. - -## v0.0.6 - -#### Features - -- Add count(distinct(..)) support - -#### Bugfixes - -- Reuse levigo read/write options. - -## v0.0.5 - -#### Features - -- Cache passwords in memory to speed up password verification -- Add MERGE and INNER JOIN support - -#### Bugfixes - -- All columns should be returned if `select *` was used -- Read/Write benchmarks - -## v0.0.2 - -#### Features - -- Add an admin UI -- Deb and RPM packages - -#### Bugfixes - -- Fix some nil pointer dereferences -- Cleanup the aggregators implementation - -## v0.0.1 [2013-10-22] - - * Initial Release diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/CONTRIBUTING.md deleted file mode 100644 index d578f1fea..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/CONTRIBUTING.md +++ /dev/null @@ -1,250 +0,0 @@ -Contributing to InfluxDB -======================== - -Bug reports ---------------- -Before you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. If you file an issue, please include the following. -* Full details of your operating system (or distribution) e.g. 64-bit Ubuntu 14.04. -* The version of InfluxDB you are running -* Whether you installed it using a pre-built package, or built it from source. -* A small test case, if applicable, that demonstrates the issues. - -Remember the golden rule of bug reports: **The easier you make it for us to reproduce the problem, the faster it will get fixed.** -If you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html) - -Test cases should be in the form of `curl` commands. For example: -```bash -# create database -curl -G http://localhost:8086/query --data-urlencode "q=CREATE DATABASE mydb" - -# create retention policy -curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY myrp ON mydb DURATION 365d REPLICATION 1 DEFAULT" - -# write data -curl -X POST http://localhost:8086/write --data-urlencode "db=mydb" --data-binary "cpu,region=useast,host=server_1,service=redis value=61" - -# Delete a Measurement -curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=DROP MEASUREMENT cpu' - -# Query the Measurement -# Bug: expected it to return no data, but data comes back. -curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=SELECT * from cpu' -``` -**If you don't include a clear test case like this, your issue may not be investigated, and may even be closed**. If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report. - -Please note that issues are *not the place to file general questions* such as "how do I use collectd with InfluxDB?" Questions of this nature should be sent to the [Google Group](https://groups.google.com/forum/#!forum/influxdb), not filed as issues. Issues like this will be closed. - -Feature requests ---------------- -We really like to receive feature requests, as it helps us prioritize our work. Please be clear about your requirements, as incomplete feature requests may simply be closed if we don't understand what you would like to see added to InfluxDB. - -Contributing to the source code ---------------- - -InfluxDB follows standard Go project structure. This means that all -your go development are done in `$GOPATH/src`. GOPATH can be any -directory under which InfluxDB and all its dependencies will be -cloned. For more details on recommended go project's structure, see -[How to Write Go Code](http://golang.org/doc/code.html) and -[Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/), or you can just follow -the steps below. - -Submitting a pull request ------------- -To submit a pull request you should fork the InfluxDB repository, and make your change on a feature branch of your fork. Then generate a pull request from your branch against *master* of the InfluxDB repository. Include in your pull request details of your change -- the why *and* the how -- as well as the testing your performed. Also, be sure to run the test suite with your change in place. Changes that cause tests to fail cannot be merged. - -There will usually be some back and forth as we finalize the change, but once that completes it may be merged. - -To assist in review for the PR, please add the following to your pull request comment: - -```md -- [ ] CHANGELOG.md updated -- [ ] Rebased/mergable -- [ ] Tests pass -- [ ] Sign [CLA](http://influxdb.com/community/cla.html) (if not already signed) -``` - -Signing the CLA ---------------- - -If you are going to be contributing back to InfluxDB please take a -second to sign our CLA, which can be found -[on our website](http://influxdb.com/community/cla.html). - -Installing Go -------------- -InfluxDB requires Go 1.4 or greater. - -At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions -on how to install it see [the gvm page on github](https://github.com/moovweb/gvm). - -After installing gvm you can install and set the default go version by -running the following: - - gvm install go1.4.2 - gvm use go1.4.2 --default - -Revision Control Systems -------------- -Go has the ability to import remote packages via revision control systems with the `go get` command. To ensure that you can retrieve any remote package, be sure to install the following rcs software to your system. -Currently the project only depends on `git` and `mercurial`. - -* [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git) -* [Install Mercurial](http://mercurial.selenic.com/wiki/Download) - -Getting the source ------- -Setup the project structure and fetch the repo like so: - -```bash - mkdir $HOME/gocodez - export GOPATH=$HOME/gocodez - go get github.com/influxdb/influxdb -``` - -You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh file to be set for every shell instead of having to manually run it everytime. - -Cloning a fork -------------- -If you wish to work with fork of InfluxDB, your own fork for example, you must still follow the directory structure above. But instead of cloning the main repo, instead clone your fork. Follow the steps below to work with a fork: - -```bash - export GOPATH=$HOME/gocodez - mkdir -p $GOPATH/src/github.com/influxdb - cd $GOPATH/src/github.com/influxdb - git clone git@github.com:/influxdb -``` - -Retaining the directory structure `$GOPATH/src/github.com/influxdb` is necessary so that Go imports work correctly. - -Pre-commit checks -------------- - -We have a pre-commit hook to make sure code is formatted properly and vetted before you commit any changes. We strongly recommend using the pre-commit hook to guard against accidentally committing unformatted code. To use the pre-commit hook, run the following: -```bash - cd $GOPATH/src/github.com/influxdb/influxdb - cp .hooks/pre-commit .git/hooks/ -``` -In case the commit is rejected because it's not formatted you can run -the following to format the code: - -``` -go fmt ./... -go vet ./... -``` - -To install go vet, run the following command: -``` -go get golang.org/x/tools/cmd/vet -``` - -NOTE: If you have not installed mercurial, the above command will fail. See [Revision Control Systems](#revision-control-systems) above. - -For more information on `go vet`, [read the GoDoc](https://godoc.org/golang.org/x/tools/cmd/vet). - -Build and Test ------ - -Make sure you have Go installed and the project structure as shown above. To then build the project, execute the following commands: - -```bash -cd $GOPATH/src/github.com/influxdb -go get -u -f -t ./... -go build ./... -``` - -To then install the binaries, run the following command. They can be found in `$GOPATH/bin`. Please note that the InfluxDB binary is named `influxd`, not `influxdb`. - -```bash -go install ./... -``` - -To set the version and commit flags during the build pass the following to the build command: - -```bash --ldflags="-X main.version $VERSION -X main.branch $BRANCH -X main.commit $COMMIT -X main.buildTime $TIME" -``` - -where `$VERSION` is the version, `$BRANCH` is the branch, `$COMMIT` is the git commit hash, and `$TIME` is the build timestamp. - -If you want to build packages, see `package.sh` help: -```bash -package.sh -h -``` - -To run the tests, execute the following command: - -```bash -cd $GOPATH/src/github.com/influxdb/influxdb -go test -v ./... - -# run tests that match some pattern -go test -run=TestDatabase . -v - -# run tests and show coverage -go test -coverprofile /tmp/cover . && go tool cover -html /tmp/cover -``` - -To install go cover, run the following command: -``` -go get golang.org/x/tools/cmd/cover -``` - -Generated Google Protobuf code ------------------ -Most changes to the source do not require that the generated protocol buffer code be changed. But if you need to modify the protocol buffer code, you'll first need to install the protocol buffers toolchain. - -First install the [protocol buffer compiler](https://developers.google.com/protocol-buffers/ -) 2.6.1 or later for your OS: - -Then install the go plugins: - -```bash -go get github.com/gogo/protobuf/proto -go get github.com/gogo/protobuf/protoc-gen-gogo -go get github.com/gogo/protobuf/gogoproto -``` - -Finally run, `go generate` after updating any `*.proto` file: - -```bash -go generate ./... -``` -**Trouleshooting** - -If generating the protobuf code is failing for you, check each of the following: - * Ensure the protobuf library can be found. Make sure that `LD_LIBRRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed. - * Ensure the command `protoc-gen-gogo`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`. - -Profiling ------ -When troubleshooting problems with CPU or memory the Go toolchain can be helpful. You can start InfluxDB with CPU or memory profiling turned on. For example: - -```sh -# start influx with profiling -./influxd -cpuprofile influxd.prof -# run queries, writes, whatever you're testing -# Quit out of influxd and influxd.prof will then be written. -# open up pprof to examine the profiling data. -go tool pprof ./influxd influxd.prof -# once inside run "web", opens up browser with the CPU graph -# can also run "web " to zoom in. Or "list " to see specific lines -``` -Note that when you pass the binary to `go tool pprof` *you must specify the path to the binary*. - -Use of third-party packages ------------- -A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarily. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) as the storage engine. So to maximise the chance your change will be accepted by us, use only the standard libraries, or the third-party packages we have decided to use. - -For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/). - -Continuous Integration testing ------ -InfluxDB uses CircleCI for continuous integration testing. To see how the code is built and tested, check out [this file](https://github.com/influxdb/influxdb/blob/master/circle-test.sh). It closely follows the build and test process outlined above. You can see the exact version of Go InfluxDB uses for testing by consulting that file. - -Useful links ------------- -- [Useful techniques in Go](http://arslan.io/ten-useful-techniques-in-go) -- [Go in production](http://peter.bourgon.org/go-in-production/) -- [Principles of designing Go APIs with channels](https://inconshreveable.com/07-08-2014/principles-of-designing-go-apis-with-channels/) -- [Common mistakes in Golang](http://soryy.com/blog/2014/common-mistakes-with-go-lang/). Especially this section `Loops, Closures, and Local Variables` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/DOCKER.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/DOCKER.md deleted file mode 100644 index e78187d9b..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/DOCKER.md +++ /dev/null @@ -1,44 +0,0 @@ -# Docker Setup -======================== - -This document describes how to build and run a minimal InfluxDB container under Docker. Currently, it has only been tested for local development and assumes that you have a working docker environment. - -## Building Image - -To build a docker image for InfluxDB from your current checkout, run the following: - -``` -$ ./build-docker.sh -``` - -This script uses the `golang:1.5` image to build a fully static binary of `influxd` and then adds it to a minimal `scratch` image. - -To build the image using a different version of go: - -``` -$ GO_VER=1.4.2 ./build-docker.sh -``` - -Available version can be found [here](https://hub.docker.com/_/golang/). - -## Single Node Container - -This will start an interactive, single-node, that publishes the containers port `8086` and `8088` to the hosts ports `8086` and `8088` respectively. This is identical to starting `influxd` manually. - -``` -$ docker run -it -p 8086:8086 -p 8088:8088 influxdb -``` - -## Multi-Node Cluster - -This will create a simple 3-node cluster. The data is stored within the container and will be lost when the container is removed. This is only useful for test clusters. - -The `HOST_IP` env variable should be your host IP if running under linux or the virtualbox VM IP if running under OSX. On OSX, this would be something like: `$(docker-machine ip dev)` or `$(boot2docker ip)` depending on which docker tool you are using. - -``` -$ export HOST_IP= -$ docker run -it -p 8086:8086 -p 8088:8088 influxdb -hostname $HOST_IP:8088 -$ docker run -it -p 8186:8086 -p 8188:8088 influxdb -hostname $HOST_IP:8188 -join $HOST_IP:8088 -$ docker run -it -p 8286:8086 -p 8288:8088 influxdb -hostname $HOST_IP:8288 -join $HOST_IP:8088 -``` - diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile b/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile deleted file mode 100644 index d30cd300d..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -FROM busybox:ubuntu-14.04 - -MAINTAINER Jason Wilder "" - -# admin, http, udp, cluster, graphite, opentsdb, collectd -EXPOSE 8083 8086 8086/udp 8088 2003 4242 25826 - -WORKDIR /app - -# copy binary into image -COPY influxd /app/ - -# Add influxd to the PATH -ENV PATH=/app:$PATH - -# Generate a default config -RUN influxd config > /etc/influxdb.toml - -# Use /data for all disk storage -RUN sed -i 's/dir = "\/.*influxdb/dir = "\/data/' /etc/influxdb.toml - -VOLUME ["/data"] - -ENTRYPOINT ["influxd", "--config", "/etc/influxdb.toml"] diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile_test_ubuntu32 b/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile_test_ubuntu32 deleted file mode 100644 index d67a91a8d..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile_test_ubuntu32 +++ /dev/null @@ -1,12 +0,0 @@ -FROM 32bit/ubuntu:14.04 - -RUN apt-get update && apt-get install -y python-software-properties software-properties-common git -RUN add-apt-repository ppa:evarlast/golang1.4 -RUN apt-get update && apt-get install -y -o Dpkg::Options::="--force-overwrite" golang-go - -ENV GOPATH=/root/go -RUN mkdir -p /root/go/src/github.com/influxdb/influxdb -RUN mkdir -p /tmp/artifacts - -VOLUME /root/go/src/github.com/influxdb/influxdb -VOLUME /tmp/artifacts diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE b/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE deleted file mode 100644 index d50222706..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013-2015 Errplane Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md deleted file mode 100644 index 7aae45f9d..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE_OF_DEPENDENCIES.md +++ /dev/null @@ -1,19 +0,0 @@ -# List -- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE) -- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) -- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING) -- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING) -- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE) -- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE) -- github.com/rakyll/statik/fs [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE) -- github.com/kimor79/gollectd [BSD LICENSE](https://github.com/kimor79/gollectd/blob/master/LICENSE) -- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license) -- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE) -- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE) -- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt) -- glyphicons [LICENSE](http://glyphicons.com/license/) -- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE) -- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE) -- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE) -- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE) - diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/Makefile b/Godeps/_workspace/src/github.com/influxdb/influxdb/Makefile deleted file mode 100644 index 66863bf98..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -PACKAGES=$(shell find . -name '*.go' -print0 | xargs -0 -n1 dirname | sort --unique) - -default: - -metalint: deadcode cyclo aligncheck defercheck structcheck lint errcheck - -deadcode: - @deadcode $(PACKAGES) 2>&1 - -cyclo: - @gocyclo -over 10 $(PACKAGES) - -aligncheck: - @aligncheck $(PACKAGES) - -defercheck: - @defercheck $(PACKAGES) - - -structcheck: - @structcheck $(PACKAGES) - -lint: - @for pkg in $(PACKAGES); do golint $$pkg; done - -errcheck: - @for pkg in $(PACKAGES); do \ - errcheck -ignorepkg=bytes,fmt -ignore=":(Rollback|Close)" $$pkg \ - done - -tools: - go get github.com/remyoudompheng/go-misc/deadcode - go get github.com/alecthomas/gocyclo - go get github.com/opennota/check/... - go get github.com/golang/lint/golint - go get github.com/kisielk/errcheck - -.PHONY: default,metalint,deadcode,cyclo,aligncheck,defercheck,structcheck,lint,errcheck,tools \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/QUERIES.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/QUERIES.md deleted file mode 100644 index 46a9eb1da..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/QUERIES.md +++ /dev/null @@ -1,180 +0,0 @@ -The top level name is called a measurement. These names can contain any characters. Then there are field names, field values, tag keys and tag values, which can also contain any characters. However, if the measurement, field, or tag contains any character other than [A-Z,a-z,0-9,_], or if it starts with a digit, it must be double-quoted. Therefore anywhere a measurement name, field key, or tag key appears it should be wrapped in double quotes. - -# Databases & retention policies - -```sql --- create a database -CREATE DATABASE - --- create a retention policy -CREATE RETENTION POLICY ON DURATION REPLICATION [DEFAULT] - --- alter retention policy -ALTER RETENTION POLICY ON (DURATION | REPLICATION | DEFAULT)+ - --- drop a database -DROP DATABASE - --- drop a retention policy -DROP RETENTION POLICY ON -``` -where `` is either `INF` for infinite retention, or an integer followed by the desired unit of time: u,ms,s,m,h,d,w for microseconds, milliseconds, seconds, minutes, hours, days, or weeks, respectively. `` must be an integer. - -If present, `DEFAULT` sets the retention policy as the default retention policy for writes and reads. - -# Users and permissions - -```sql --- create user -CREATE USER WITH PASSWORD '' - --- grant privilege on a database -GRANT ON TO - --- grant cluster admin privileges -GRANT ALL [PRIVILEGES] TO - --- revoke privilege -REVOKE ON FROM - --- revoke all privileges for a DB -REVOKE ALL [PRIVILEGES] ON FROM - --- revoke all privileges including cluster admin -REVOKE ALL [PRIVILEGES] FROM - --- combine db creation with privilege assignment (user must already exist) -CREATE DATABASE GRANT TO -CREATE DATABASE REVOKE FROM - --- delete a user -DROP USER - - -``` -where ` := READ | WRITE | All `. - -Authentication must be enabled in the influxdb.conf file for user permissions to be in effect. - -By default, newly created users have no privileges to any databases. - -Cluster administration privileges automatically grant full read and write permissions to all databases, regardless of subsequent database-specific privilege revocation statements. - -# Select - -```sql -SELECT mean(value) from cpu WHERE host = 'serverA' AND time > now() - 4h GROUP BY time(5m) - -SELECT mean(value) from cpu WHERE time > now() - 4h GROUP BY time(5m), region -``` - -## Group By - -# Delete - -# Series - -## Destroy - -```sql -DROP MEASUREMENT -DROP MEASUREMENT cpu WHERE region = 'uswest' -``` - -## Show - -Show series queries are for pulling out individual series from measurement names and tag data. They're useful for discovery. - -```sql --- show all databases -SHOW DATABASES - --- show measurement names -SHOW MEASUREMENTS -SHOW MEASUREMENTS LIMIT 15 -SHOW MEASUREMENTS LIMIT 10 OFFSET 40 -SHOW MEASUREMENTS WHERE service = 'redis' --- LIMIT and OFFSET can be applied to any of the SHOW type queries - --- show all series across all measurements/tagsets -SHOW SERIES - --- get a show of all series for any measurements where tag key region = tak value 'uswest' -SHOW SERIES WHERE region = 'uswest' - -SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 - --- returns the 100 - 109 rows in the result. In the case of SHOW SERIES, which returns --- series split into measurements. Each series counts as a row. So you could see only a --- single measurement returned, but 10 series within it. -SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 OFFSET 100 - --- show all retention policies on a database -SHOW RETENTION POLICIES ON mydb - --- get a show of all tag keys across all measurements -SHOW TAG KEYS - --- show all the tag keys for a given measurement -SHOW TAG KEYS FROM cpu -SHOW TAG KEYS FROM temperature, wind_speed - --- show all the tag values. note that a single WHERE TAG KEY = '...' clause is required -SHOW TAG VALUES WITH TAG KEY = 'region' -SHOW TAG VALUES FROM cpu WHERE region = 'uswest' WITH TAG KEY = 'host' - --- and you can do stuff against fields -SHOW FIELD KEYS FROM cpu - --- but you can't do this -SHOW FIELD VALUES --- we don't index field values, so this query should be invalid. - --- show all users -SHOW USERS -``` - -Note that `FROM` and `WHERE` are optional clauses in most of the show series queries. - -And the show series output looks like this: - -```json -[ - { - "name": "cpu", - "columns": ["id", "region", "host"], - "values": [ - 1, "uswest", "servera", - 2, "uswest", "serverb" - ] - }, - { - "name": "reponse_time", - "columns": ["id", "application", "host"], - "values": [ - 3, "myRailsApp", "servera" - ] - } -] -``` - -# Continuous Queries - -Continuous queries are going to be inspired by MySQL `TRIGGER` syntax: - -http://dev.mysql.com/doc/refman/5.0/en/trigger-syntax.html - -Instead of having automatically-assigned ids, named continuous queries allows for some level of duplication prevention, -particularly in the case where creation is scripted. - -## Create - - CREATE CONTINUOUS QUERY AS SELECT ... FROM ... - -## Destroy - - DROP CONTINUOUS QUERY - -## List - - SHOW CONTINUOUS QUERIES diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/README.md deleted file mode 100644 index 94b8547ab..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# InfluxDB [![Circle CI](https://circleci.com/gh/influxdb/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdb/influxdb/tree/master) - -## An Open-Source, Distributed, Time Series Database - -> InfluxDB v0.9.0 is now out. Going forward, the 0.9.x series of releases will not make breaking API changes or breaking changes to the underlying data storage. However, 0.9.0 clustering should be considered an alpha release. - -InfluxDB is an open source **distributed time series database** with -**no external dependencies**. It's useful for recording metrics, -events, and performing analytics. - -## Features - -* Built-in [HTTP API](http://influxdb.com/docs/v0.9/concepts/reading_and_writing_data.html) so you don't have to write any server side code to get up and running. -* Data can be tagged, allowing very flexible querying. -* SQL-like query language. -* Clustering is supported out of the box, so that you can scale horizontally to handle your data. -* Simple to install and manage, and fast to get data in and out. -* It aims to answer queries in real-time. That means every data point is - indexed as it comes in and is immediately available in queries that - should return in < 100ms. - -## Getting Started -*The following directions apply only to the 0.9.0 release or building from the source on master.* - -### Building - -You don't need to build the project to use it - you can use any of our -[pre-built packages](http://influxdb.com/download/index.html) to install InfluxDB. That's -the recommended way to get it running. However, if you want to contribute to the core of InfluxDB, you'll need to build. -For those adventurous enough, you can -[follow along on our docs](http://github.com/influxdb/influxdb/blob/master/CONTRIBUTING.md). - -### Starting InfluxDB -* `service influxdb start` if you have installed InfluxDB using an official Debian or RPM package. -* `systemctl start influxdb` if you have installed InfluxDB using an official Debian or RPM package, and are running a distro with `systemd`. For example, Ubuntu 15 or later. -* `$GOPATH/bin/influxd` if you have built InfluxDB from source. - -### Creating your first database - -``` -curl -G 'http://localhost:8086/query' --data-urlencode "q=CREATE DATABASE mydb" -``` - -### Insert some data -``` -curl -XPOST 'http://localhost:8086/write?db=mydb' \ --d 'cpu,host=server01,region=uswest load=42 1434055562000000000' - -curl -XPOST 'http://localhost:8086/write?db=mydb' \ --d 'cpu,host=server02,region=uswest load=78 1434055562000000000' - -curl -XPOST 'http://localhost:8086/write?db=mydb' \ --d 'cpu,host=server03,region=useast load=15.4 1434055562000000000' -``` - -### Query for the data -```JSON -curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ ---data-urlencode "q=SELECT * FROM cpu WHERE host='server01' AND time < now() - 1d" -``` - -### Analyze the data -```JSON -curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ ---data-urlencode "q=SELECT mean(load) FROM cpu WHERE region='uswest'" -``` - -## Helpful Links - -* Understand the [design goals and motivations of the project](http://influxdb.com/docs/v0.9/introduction/overview.html). -* Follow the [getting started guide](http://influxdb.com/docs/v0.9/introduction/getting_started.html) to find out how to install InfluxDB, start writing more data, and issue more queries - in just a few minutes. -* See the [HTTP API documentation to start writing a library for your favorite language](http://influxdb.com/docs/v0.9/concepts/reading_and_writing_data.html). diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/build-docker.sh b/Godeps/_workspace/src/github.com/influxdb/influxdb/build-docker.sh deleted file mode 100644 index 0dea62d2a..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/build-docker.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh - -set -e -x - -GO_VER=${GO_VER:-1.5} - -docker run -it -v "${GOPATH}":/gopath -v "$(pwd)":/app -e "GOPATH=/gopath" -w /app golang:$GO_VER sh -c 'CGO_ENABLED=0 go build -a --installsuffix cgo --ldflags="-s" -o influxd ./cmd/influxd' - -docker build -t influxdb . diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/circle-test.sh b/Godeps/_workspace/src/github.com/influxdb/influxdb/circle-test.sh deleted file mode 100644 index f4ac8d7db..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/circle-test.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/bin/bash -# -# This is the InfluxDB CircleCI test script. Using this script allows total control -# the environment in which the build and test is run, and matches the official -# build process for InfluxDB. - -BUILD_DIR=$HOME/influxdb-build -GO_VERSION=go1.4.2 -PARALLELISM="-parallel 256" -TIMEOUT="-timeout 480s" - -# Executes the given statement, and exits if the command returns a non-zero code. -function exit_if_fail { - command=$@ - echo "Executing '$command'" - $command - rc=$? - if [ $rc -ne 0 ]; then - echo "'$command' returned $rc." - exit $rc - fi -} - -# Check that go fmt has been run. -function check_go_fmt { - fmtcount=`git ls-files | grep '.go$' | xargs gofmt -l 2>&1 | wc -l` - if [ $fmtcount -gt 0 ]; then - echo "run 'go fmt ./...' to format your source code." - exit 1 - fi -} - -# Check that go vet passes. -function check_go_vet { - # Due to the way composites work, vet will fail for some of our tests so we ignore it - vetcount=`go tool vet --composites=false ./ 2>&1 | wc -l` - if [ $vetcount -gt 0 ]; then - echo "run 'go tool vet --composites=false ./' to see the errors it flags and correct your source code." - exit 1 - fi -} - -source $HOME/.gvm/scripts/gvm -exit_if_fail gvm use $GO_VERSION - -# Set up the build directory, and then GOPATH. -exit_if_fail mkdir $BUILD_DIR -export GOPATH=$BUILD_DIR -exit_if_fail mkdir -p $GOPATH/src/github.com/influxdb - -# Dump some test config to the log. -echo "Test configuration" -echo "========================================" -echo "\$HOME: $HOME" -echo "\$GOPATH: $GOPATH" -echo "\$CIRCLE_BRANCH: $CIRCLE_BRANCH" - -# Move the checked-out source to a better location. -exit_if_fail mv $HOME/influxdb $GOPATH/src/github.com/influxdb -exit_if_fail cd $GOPATH/src/github.com/influxdb/influxdb -exit_if_fail git branch --set-upstream-to=origin/$CIRCLE_BRANCH $CIRCLE_BRANCH - -# Install the code. -exit_if_fail cd $GOPATH/src/github.com/influxdb/influxdb -exit_if_fail go get -t -d -v ./... -exit_if_fail git checkout $CIRCLE_BRANCH # 'go get' switches to master. Who knew? Switch back. -check_go_fmt -check_go_vet -exit_if_fail go build -v ./... - -# Run the tests. -case $CIRCLE_NODE_INDEX in - 0) - go test $PARALLELISM $TIMEOUT -v ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs.txt - rc=${PIPESTATUS[0]} - ;; - 1) - INFLUXDB_DATA_ENGINE="tsm1" go test $PARALLELISM $TIMEOUT -v ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs.txt - rc=${PIPESTATUS[0]} - ;; - 2) - # 32bit tests. - if [[ -e ~/docker/image.tar ]]; then docker load -i ~/docker/image.tar; fi - docker build -f Dockerfile_test_ubuntu32 -t ubuntu-32-influxdb-test . - mkdir -p ~/docker; docker save ubuntu-32-influxdb-test > ~/docker/image.tar - exit_if_fail docker build -f Dockerfile_test_ubuntu32 -t ubuntu-32-influxdb-test . - docker run -v $(pwd):/root/go/src/github.com/influxdb/influxdb -e "CI=${CI}" \ - -v ${CIRCLE_ARTIFACTS}:/tmp/artifacts \ - -t ubuntu-32-influxdb-test bash \ - -c "cd /root/go/src/github.com/influxdb/influxdb && go get -t -d -v ./... && go build -v ./... && go test ${PARALLELISM} ${TIMEOUT} -v ./... 2>&1 | tee /tmp/artifacts/test_logs_i386.txt && exit \${PIPESTATUS[0]}" - rc=$? - ;; - 3) - GORACE="halt_on_error=1" go test $PARALLELISM $TIMEOUT -v -race ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs_race.txt - rc=${PIPESTATUS[0]} - ;; -esac - -exit $rc diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/circle.yml b/Godeps/_workspace/src/github.com/influxdb/influxdb/circle.yml deleted file mode 100644 index 8f616c96f..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/circle.yml +++ /dev/null @@ -1,16 +0,0 @@ -machine: - services: - - docker - pre: - - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer) - - source $HOME/.gvm/scripts/gvm; gvm install go1.4.2 --binary - -dependencies: - override: - - mkdir -p ~/docker - cache_directories: - - "~/docker" -test: - override: - - bash circle-test.sh: - parallel: true diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md deleted file mode 100644 index 8a041128d..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/README.md +++ /dev/null @@ -1,256 +0,0 @@ -# InfluxDB Client - -[![GoDoc](https://godoc.org/github.com/influxdb/influxdb?status.svg)](http://godoc.org/github.com/influxdb/influxdb/client/v2) - -## Description - -**NOTE:** The Go client library now has a "v2" version, with the old version -being deprecated. The new version can be imported at -`import "github.com/influxdb/influxdb/client/v2"`. It is not backwards-compatible. - -A Go client library written and maintained by the **InfluxDB** team. -This package provides convenience functions to read and write time series data. -It uses the HTTP protocol to communicate with your **InfluxDB** cluster. - - -## Getting Started - -### Connecting To Your Database - -Connecting to an **InfluxDB** database is straightforward. You will need a host -name, a port and the cluster user credentials if applicable. The default port is -8086. You can customize these settings to your specific installation via the -**InfluxDB** configuration file. - -Thought not necessary for experimentation, you may want to create a new user -and authenticate the connection to your database. - -For more information please check out the -[Cluster Admin Docs](http://influxdb.com/docs/v0.9/query_language/database_administration.html). - -For the impatient, you can create a new admin user _bubba_ by firing off the -[InfluxDB CLI](https://github.com/influxdb/influxdb/blob/master/cmd/influx/main.go). - -```shell -influx -> create user bubba with password 'bumblebeetuna' -> grant all privileges to bubba -``` - -And now for good measure set the credentials in you shell environment. -In the example below we will use $INFLUX_USER and $INFLUX_PWD - -Now with the administrivia out of the way, let's connect to our database. - -NOTE: If you've opted out of creating a user, you can omit Username and Password in -the configuration below. - -```go -package main - -import -import ( - "net/url" - "fmt" - "log" - "os" - - "github.com/influxdb/influxdb/client/v2" -) - -const ( - MyDB = "square_holes" - username = "bubba" - password = "bumblebeetuna" -) - -func main() { - // Make client - u, _ := url.Parse("http://localhost:8086") - c := client.NewClient(client.Config{ - URL: u, - Username: username, - Password: password, - }) - - // Create a new point batch - bp := client.NewBatchPoints(client.BatchPointsConfig{ - Database: MyDB, - Precision: "s", - }) - - // Create a point and add to batch - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{ - "idle": 10.1, - "system": 53.3, - "user": 46.6, - } - pt := client.NewPoint("cpu_usage", tags, fields, time.Now()) - bp.AddPoint(pt) - - // Write the batch - c.Write(bp) -} - -``` - -### Inserting Data - -Time series data aka *points* are written to the database using batch inserts. -The mechanism is to create one or more points and then create a batch aka -*batch points* and write these to a given database and series. A series is a -combination of a measurement (time/values) and a set of tags. - -In this sample we will create a batch of a 1,000 points. Each point has a time and -a single value as well as 2 tags indicating a shape and color. We write these points -to a database called _square_holes_ using a measurement named _shapes_. - -NOTE: You can specify a RetentionPolicy as part of the batch points. If not -provided InfluxDB will use the database _default_ retention policy. - -```go -func writePoints(clnt client.Client) { - sampleSize := 1000 - rand.Seed(42) - - bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ - Database: "systemstats", - Precision: "us", - }) - - for i := 0; i < sampleSize; i++ { - regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"} - tags := map[string]string{ - "cpu": "cpu-total", - "host": fmt.Sprintf("host%d", rand.Intn(1000)), - "region": regions[rand.Intn(len(regions))], - } - - idle := rand.Float64() * 100.0 - fields := map[string]interface{}{ - "idle": idle, - "busy": 100.0 - idle, - } - - bp.AddPoint(client.NewPoint( - "cpu_usage", - tags, - fields, - time.Now(), - )) - } - - err := clnt.Write(bp) - if err != nil { - log.Fatal(err) - } -} -``` - - -### Querying Data - -One nice advantage of using **InfluxDB** the ability to query your data using familiar -SQL constructs. In this example we can create a convenience function to query the database -as follows: - -```go -// queryDB convenience function to query the database -func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) { - q := client.Query{ - Command: cmd, - Database: MyDB, - } - if response, err := clnt.Query(q); err == nil { - if response.Error() != nil { - return res, response.Error() - } - res = response.Results - } - return res, nil -} -``` - -#### Creating a Database - -```go -_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB)) -if err != nil { - log.Fatal(err) -} -``` - -#### Count Records - -```go -q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement) -res, err := queryDB(clnt, q) -if err != nil { - log.Fatal(err) -} -count := res[0].Series[0].Values[0][1] -log.Printf("Found a total of %v records\n", count) -``` - -#### Find the last 10 _shapes_ records - -```go -q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20) -res, err = queryDB(clnt, q) -if err != nil { - log.Fatal(err) -} - -for i, row := range res[0].Series[0].Values { - t, err := time.Parse(time.RFC3339, row[0].(string)) - if err != nil { - log.Fatal(err) - } - val := row[1].(string) - log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val) -} -``` - -### Using the UDP Client - -The **InfluxDB** client also supports writing over UDP. - -```go -func WriteUDP() { - // Make client - c := client.NewUDPClient("localhost:8089") - - // Create a new point batch - bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ - Precision: "s", - }) - - // Create a point and add to batch - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{ - "idle": 10.1, - "system": 53.3, - "user": 46.6, - } - pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) - if err != nil { - panic(err.Error()) - } - bp.AddPoint(pt) - - // Write the batch - c.Write(bp) -} -``` - -## Go Docs - -Please refer to -[http://godoc.org/github.com/influxdb/influxdb/client/v2](http://godoc.org/github.com/influxdb/influxdb/client/v2) -for documentation. - -## See Also - -You can also examine how the client library is used by the -[InfluxDB CLI](https://github.com/influxdb/influxdb/blob/master/cmd/influx/main.go). diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/example_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/example_test.go deleted file mode 100644 index e2dc10b71..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/example_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package client_test - -import ( - "fmt" - "log" - "math/rand" - "net/url" - "os" - "strconv" - "time" - - "github.com/influxdb/influxdb/client" -) - -func ExampleNewClient() { - host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) - if err != nil { - log.Fatal(err) - } - - // NOTE: this assumes you've setup a user and have setup shell env variables, - // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below. - conf := client.Config{ - URL: *host, - Username: os.Getenv("INFLUX_USER"), - Password: os.Getenv("INFLUX_PWD"), - } - con, err := client.NewClient(conf) - if err != nil { - log.Fatal(err) - } - log.Println("Connection", con) -} - -func ExampleClient_Ping() { - host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) - if err != nil { - log.Fatal(err) - } - con, err := client.NewClient(client.Config{URL: *host}) - if err != nil { - log.Fatal(err) - } - - dur, ver, err := con.Ping() - if err != nil { - log.Fatal(err) - } - log.Printf("Happy as a hippo! %v, %s", dur, ver) -} - -func ExampleClient_Query() { - host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) - if err != nil { - log.Fatal(err) - } - con, err := client.NewClient(client.Config{URL: *host}) - if err != nil { - log.Fatal(err) - } - - q := client.Query{ - Command: "select count(value) from shapes", - Database: "square_holes", - } - if response, err := con.Query(q); err == nil && response.Error() == nil { - log.Println(response.Results) - } -} - -func ExampleClient_Write() { - host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) - if err != nil { - log.Fatal(err) - } - con, err := client.NewClient(client.Config{URL: *host}) - if err != nil { - log.Fatal(err) - } - - var ( - shapes = []string{"circle", "rectangle", "square", "triangle"} - colors = []string{"red", "blue", "green"} - sampleSize = 1000 - pts = make([]client.Point, sampleSize) - ) - - rand.Seed(42) - for i := 0; i < sampleSize; i++ { - pts[i] = client.Point{ - Measurement: "shapes", - Tags: map[string]string{ - "color": strconv.Itoa(rand.Intn(len(colors))), - "shape": strconv.Itoa(rand.Intn(len(shapes))), - }, - Fields: map[string]interface{}{ - "value": rand.Intn(sampleSize), - }, - Time: time.Now(), - Precision: "s", - } - } - - bps := client.BatchPoints{ - Points: pts, - Database: "BumbeBeeTuna", - RetentionPolicy: "default", - } - _, err = con.Write(bps) - if err != nil { - log.Fatal(err) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go deleted file mode 100644 index 9e0d72717..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb.go +++ /dev/null @@ -1,688 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/influxdb/influxdb/models" -) - -const ( - // DefaultHost is the default host used to connect to an InfluxDB instance - DefaultHost = "localhost" - - // DefaultPort is the default port used to connect to an InfluxDB instance - DefaultPort = 8086 - - // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance - DefaultTimeout = 0 -) - -// Query is used to send a command to the server. Both Command and Database are required. -type Query struct { - Command string - Database string -} - -// ParseConnectionString will parse a string to create a valid connection URL -func ParseConnectionString(path string, ssl bool) (url.URL, error) { - var host string - var port int - - h, p, err := net.SplitHostPort(path) - if err != nil { - if path == "" { - host = DefaultHost - } else { - host = path - } - // If they didn't specify a port, always use the default port - port = DefaultPort - } else { - host = h - port, err = strconv.Atoi(p) - if err != nil { - return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err) - } - } - - u := url.URL{ - Scheme: "http", - } - if ssl { - u.Scheme = "https" - } - - u.Host = net.JoinHostPort(host, strconv.Itoa(port)) - - return u, nil -} - -// Config is used to specify what server to connect to. -// URL: The URL of the server connecting to. -// Username/Password are optional. They will be passed via basic auth if provided. -// UserAgent: If not provided, will default "InfluxDBClient", -// Timeout: If not provided, will default to 0 (no timeout) -type Config struct { - URL url.URL - Username string - Password string - UserAgent string - Timeout time.Duration - Precision string -} - -// NewConfig will create a config to be used in connecting to the client -func NewConfig() Config { - return Config{ - Timeout: DefaultTimeout, - } -} - -// Client is used to make calls to the server. -type Client struct { - url url.URL - username string - password string - httpClient *http.Client - userAgent string - precision string -} - -const ( - // ConsistencyOne requires at least one data node acknowledged a write. - ConsistencyOne = "one" - - // ConsistencyAll requires all data nodes to acknowledge a write. - ConsistencyAll = "all" - - // ConsistencyQuorum requires a quorum of data nodes to acknowledge a write. - ConsistencyQuorum = "quorum" - - // ConsistencyAny allows for hinted hand off, potentially no write happened yet. - ConsistencyAny = "any" -) - -// NewClient will instantiate and return a connected client to issue commands to the server. -func NewClient(c Config) (*Client, error) { - client := Client{ - url: c.URL, - username: c.Username, - password: c.Password, - httpClient: &http.Client{Timeout: c.Timeout}, - userAgent: c.UserAgent, - precision: c.Precision, - } - if client.userAgent == "" { - client.userAgent = "InfluxDBClient" - } - return &client, nil -} - -// SetAuth will update the username and passwords -func (c *Client) SetAuth(u, p string) { - c.username = u - c.password = p -} - -// SetPrecision will update the precision -func (c *Client) SetPrecision(precision string) { - c.precision = precision -} - -// Query sends a command to the server and returns the Response -func (c *Client) Query(q Query) (*Response, error) { - u := c.url - - u.Path = "query" - values := u.Query() - values.Set("q", q.Command) - values.Set("db", q.Database) - if c.precision != "" { - values.Set("epoch", c.precision) - } - u.RawQuery = values.Encode() - - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", c.userAgent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var response Response - dec := json.NewDecoder(resp.Body) - dec.UseNumber() - decErr := dec.Decode(&response) - - // ignore this error if we got an invalid status code - if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { - decErr = nil - } - // If we got a valid decode error, send that back - if decErr != nil { - return nil, decErr - } - // If we don't have an error in our json response, and didn't get StatusOK, then send back an error - if resp.StatusCode != http.StatusOK && response.Error() == nil { - return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) - } - return &response, nil -} - -// Write takes BatchPoints and allows for writing of multiple points with defaults -// If successful, error is nil and Response is nil -// If an error occurs, Response may contain additional information if populated. -func (c *Client) Write(bp BatchPoints) (*Response, error) { - u := c.url - u.Path = "write" - - var b bytes.Buffer - for _, p := range bp.Points { - if p.Raw != "" { - if _, err := b.WriteString(p.Raw); err != nil { - return nil, err - } - } else { - for k, v := range bp.Tags { - if p.Tags == nil { - p.Tags = make(map[string]string, len(bp.Tags)) - } - p.Tags[k] = v - } - - if _, err := b.WriteString(p.MarshalString()); err != nil { - return nil, err - } - } - - if err := b.WriteByte('\n'); err != nil { - return nil, err - } - } - - req, err := http.NewRequest("POST", u.String(), &b) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "") - req.Header.Set("User-Agent", c.userAgent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - - precision := bp.Precision - if precision == "" { - precision = c.precision - } - - params := req.URL.Query() - params.Set("db", bp.Database) - params.Set("rp", bp.RetentionPolicy) - params.Set("precision", precision) - params.Set("consistency", bp.WriteConsistency) - req.URL.RawQuery = params.Encode() - - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var response Response - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { - var err = fmt.Errorf(string(body)) - response.Err = err - return &response, err - } - - return nil, nil -} - -// WriteLineProtocol takes a string with line returns to delimit each write -// If successful, error is nil and Response is nil -// If an error occurs, Response may contain additional information if populated. -func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) { - u := c.url - u.Path = "write" - - r := strings.NewReader(data) - - req, err := http.NewRequest("POST", u.String(), r) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "") - req.Header.Set("User-Agent", c.userAgent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - params := req.URL.Query() - params.Set("db", database) - params.Set("rp", retentionPolicy) - params.Set("precision", precision) - params.Set("consistency", writeConsistency) - req.URL.RawQuery = params.Encode() - - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var response Response - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { - err := fmt.Errorf(string(body)) - response.Err = err - return &response, err - } - - return nil, nil -} - -// Ping will check to see if the server is up -// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. -func (c *Client) Ping() (time.Duration, string, error) { - now := time.Now() - u := c.url - u.Path = "ping" - - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return 0, "", err - } - req.Header.Set("User-Agent", c.userAgent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - - resp, err := c.httpClient.Do(req) - if err != nil { - return 0, "", err - } - defer resp.Body.Close() - - version := resp.Header.Get("X-Influxdb-Version") - return time.Since(now), version, nil -} - -// Structs - -// Result represents a resultset returned from a single statement. -type Result struct { - Series []models.Row - Err error -} - -// MarshalJSON encodes the result into JSON. -func (r *Result) MarshalJSON() ([]byte, error) { - // Define a struct that outputs "error" as a string. - var o struct { - Series []models.Row `json:"series,omitempty"` - Err string `json:"error,omitempty"` - } - - // Copy fields to output struct. - o.Series = r.Series - if r.Err != nil { - o.Err = r.Err.Error() - } - - return json.Marshal(&o) -} - -// UnmarshalJSON decodes the data into the Result struct -func (r *Result) UnmarshalJSON(b []byte) error { - var o struct { - Series []models.Row `json:"series,omitempty"` - Err string `json:"error,omitempty"` - } - - dec := json.NewDecoder(bytes.NewBuffer(b)) - dec.UseNumber() - err := dec.Decode(&o) - if err != nil { - return err - } - r.Series = o.Series - if o.Err != "" { - r.Err = errors.New(o.Err) - } - return nil -} - -// Response represents a list of statement results. -type Response struct { - Results []Result - Err error -} - -// MarshalJSON encodes the response into JSON. -func (r *Response) MarshalJSON() ([]byte, error) { - // Define a struct that outputs "error" as a string. - var o struct { - Results []Result `json:"results,omitempty"` - Err string `json:"error,omitempty"` - } - - // Copy fields to output struct. - o.Results = r.Results - if r.Err != nil { - o.Err = r.Err.Error() - } - - return json.Marshal(&o) -} - -// UnmarshalJSON decodes the data into the Response struct -func (r *Response) UnmarshalJSON(b []byte) error { - var o struct { - Results []Result `json:"results,omitempty"` - Err string `json:"error,omitempty"` - } - - dec := json.NewDecoder(bytes.NewBuffer(b)) - dec.UseNumber() - err := dec.Decode(&o) - if err != nil { - return err - } - r.Results = o.Results - if o.Err != "" { - r.Err = errors.New(o.Err) - } - return nil -} - -// Error returns the first error from any statement. -// Returns nil if no errors occurred on any statements. -func (r Response) Error() error { - if r.Err != nil { - return r.Err - } - for _, result := range r.Results { - if result.Err != nil { - return result.Err - } - } - return nil -} - -// Point defines the fields that will be written to the database -// Measurement, Time, and Fields are required -// Precision can be specified if the time is in epoch format (integer). -// Valid values for Precision are n, u, ms, s, m, and h -type Point struct { - Measurement string - Tags map[string]string - Time time.Time - Fields map[string]interface{} - Precision string - Raw string -} - -// MarshalJSON will format the time in RFC3339Nano -// Precision is also ignored as it is only used for writing, not reading -// Or another way to say it is we always send back in nanosecond precision -func (p *Point) MarshalJSON() ([]byte, error) { - point := struct { - Measurement string `json:"measurement,omitempty"` - Tags map[string]string `json:"tags,omitempty"` - Time string `json:"time,omitempty"` - Fields map[string]interface{} `json:"fields,omitempty"` - Precision string `json:"precision,omitempty"` - }{ - Measurement: p.Measurement, - Tags: p.Tags, - Fields: p.Fields, - Precision: p.Precision, - } - // Let it omit empty if it's really zero - if !p.Time.IsZero() { - point.Time = p.Time.UTC().Format(time.RFC3339Nano) - } - return json.Marshal(&point) -} - -// MarshalString renders string representation of a Point with specified -// precision. The default precision is nanoseconds. -func (p *Point) MarshalString() string { - pt, err := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time) - if err != nil { - return "# ERROR: " + err.Error() + " " + p.Measurement - } - if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" { - return pt.String() - } - return pt.PrecisionString(p.Precision) -} - -// UnmarshalJSON decodes the data into the Point struct -func (p *Point) UnmarshalJSON(b []byte) error { - var normal struct { - Measurement string `json:"measurement"` - Tags map[string]string `json:"tags"` - Time time.Time `json:"time"` - Precision string `json:"precision"` - Fields map[string]interface{} `json:"fields"` - } - var epoch struct { - Measurement string `json:"measurement"` - Tags map[string]string `json:"tags"` - Time *int64 `json:"time"` - Precision string `json:"precision"` - Fields map[string]interface{} `json:"fields"` - } - - if err := func() error { - var err error - dec := json.NewDecoder(bytes.NewBuffer(b)) - dec.UseNumber() - if err = dec.Decode(&epoch); err != nil { - return err - } - // Convert from epoch to time.Time, but only if Time - // was actually set. - var ts time.Time - if epoch.Time != nil { - ts, err = EpochToTime(*epoch.Time, epoch.Precision) - if err != nil { - return err - } - } - p.Measurement = epoch.Measurement - p.Tags = epoch.Tags - p.Time = ts - p.Precision = epoch.Precision - p.Fields = normalizeFields(epoch.Fields) - return nil - }(); err == nil { - return nil - } - - dec := json.NewDecoder(bytes.NewBuffer(b)) - dec.UseNumber() - if err := dec.Decode(&normal); err != nil { - return err - } - normal.Time = SetPrecision(normal.Time, normal.Precision) - p.Measurement = normal.Measurement - p.Tags = normal.Tags - p.Time = normal.Time - p.Precision = normal.Precision - p.Fields = normalizeFields(normal.Fields) - - return nil -} - -// Remove any notion of json.Number -func normalizeFields(fields map[string]interface{}) map[string]interface{} { - newFields := map[string]interface{}{} - - for k, v := range fields { - switch v := v.(type) { - case json.Number: - jv, e := v.Float64() - if e != nil { - panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e)) - } - newFields[k] = jv - default: - newFields[k] = v - } - } - return newFields -} - -// BatchPoints is used to send batched data in a single write. -// Database and Points are required -// If no retention policy is specified, it will use the databases default retention policy. -// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored. -// If time is specified, it will be applied to any point with an empty time. -// Precision can be specified if the time is in epoch format (integer). -// Valid values for Precision are n, u, ms, s, m, and h -type BatchPoints struct { - Points []Point `json:"points,omitempty"` - Database string `json:"database,omitempty"` - RetentionPolicy string `json:"retentionPolicy,omitempty"` - Tags map[string]string `json:"tags,omitempty"` - Time time.Time `json:"time,omitempty"` - Precision string `json:"precision,omitempty"` - WriteConsistency string `json:"-"` -} - -// UnmarshalJSON decodes the data into the BatchPoints struct -func (bp *BatchPoints) UnmarshalJSON(b []byte) error { - var normal struct { - Points []Point `json:"points"` - Database string `json:"database"` - RetentionPolicy string `json:"retentionPolicy"` - Tags map[string]string `json:"tags"` - Time time.Time `json:"time"` - Precision string `json:"precision"` - } - var epoch struct { - Points []Point `json:"points"` - Database string `json:"database"` - RetentionPolicy string `json:"retentionPolicy"` - Tags map[string]string `json:"tags"` - Time *int64 `json:"time"` - Precision string `json:"precision"` - } - - if err := func() error { - var err error - if err = json.Unmarshal(b, &epoch); err != nil { - return err - } - // Convert from epoch to time.Time - var ts time.Time - if epoch.Time != nil { - ts, err = EpochToTime(*epoch.Time, epoch.Precision) - if err != nil { - return err - } - } - bp.Points = epoch.Points - bp.Database = epoch.Database - bp.RetentionPolicy = epoch.RetentionPolicy - bp.Tags = epoch.Tags - bp.Time = ts - bp.Precision = epoch.Precision - return nil - }(); err == nil { - return nil - } - - if err := json.Unmarshal(b, &normal); err != nil { - return err - } - normal.Time = SetPrecision(normal.Time, normal.Precision) - bp.Points = normal.Points - bp.Database = normal.Database - bp.RetentionPolicy = normal.RetentionPolicy - bp.Tags = normal.Tags - bp.Time = normal.Time - bp.Precision = normal.Precision - - return nil -} - -// utility functions - -// Addr provides the current url as a string of the server the client is connected to. -func (c *Client) Addr() string { - return c.url.String() -} - -// helper functions - -// EpochToTime takes a unix epoch time and uses precision to return back a time.Time -func EpochToTime(epoch int64, precision string) (time.Time, error) { - if precision == "" { - precision = "s" - } - var t time.Time - switch precision { - case "h": - t = time.Unix(0, epoch*int64(time.Hour)) - case "m": - t = time.Unix(0, epoch*int64(time.Minute)) - case "s": - t = time.Unix(0, epoch*int64(time.Second)) - case "ms": - t = time.Unix(0, epoch*int64(time.Millisecond)) - case "u": - t = time.Unix(0, epoch*int64(time.Microsecond)) - case "n": - t = time.Unix(0, epoch) - default: - return time.Time{}, fmt.Errorf("Unknown precision %q", precision) - } - return t, nil -} - -// SetPrecision will round a time to the specified precision -func SetPrecision(t time.Time, precision string) time.Time { - switch precision { - case "n": - case "u": - return t.Round(time.Microsecond) - case "ms": - return t.Round(time.Millisecond) - case "s": - return t.Round(time.Second) - case "m": - return t.Round(time.Minute) - case "h": - return t.Round(time.Hour) - } - return t -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go deleted file mode 100644 index 0a1981c83..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/influxdb_test.go +++ /dev/null @@ -1,560 +0,0 @@ -package client_test - -import ( - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" - "testing" - "time" - - "github.com/influxdb/influxdb/client" -) - -func BenchmarkUnmarshalJSON2Tags(b *testing.B) { - var bp client.BatchPoints - data := []byte(` -{ - "database": "foo", - "retentionPolicy": "bar", - "points": [ - { - "name": "cpu", - "tags": { - "host": "server01", - "region": "us-east1" - }, - "time": 14244733039069373, - "precision": "n", - "fields": { - "value": 4541770385657154000 - } - } - ] -} -`) - - for i := 0; i < b.N; i++ { - if err := json.Unmarshal(data, &bp); err != nil { - b.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) - } - b.SetBytes(int64(len(data))) - } -} - -func BenchmarkUnmarshalJSON10Tags(b *testing.B) { - var bp client.BatchPoints - data := []byte(` -{ - "database": "foo", - "retentionPolicy": "bar", - "points": [ - { - "name": "cpu", - "tags": { - "host": "server01", - "region": "us-east1", - "tag1": "value1", - "tag2": "value2", - "tag2": "value3", - "tag4": "value4", - "tag5": "value5", - "tag6": "value6", - "tag7": "value7", - "tag8": "value8" - }, - "time": 14244733039069373, - "precision": "n", - "fields": { - "value": 4541770385657154000 - } - } - ] -} -`) - - for i := 0; i < b.N; i++ { - if err := json.Unmarshal(data, &bp); err != nil { - b.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) - } - b.SetBytes(int64(len(data))) - } -} - -func TestNewClient(t *testing.T) { - config := client.Config{} - _, err := client.NewClient(config) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } -} - -func TestClient_Ping(t *testing.T) { - ts := emptyTestServer() - defer ts.Close() - - u, _ := url.Parse(ts.URL) - config := client.Config{URL: *u} - c, err := client.NewClient(config) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - d, version, err := c.Ping() - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - if d == 0 { - t.Fatalf("expected a duration greater than zero. actual %v", d) - } - if version != "x.x" { - t.Fatalf("unexpected version. expected %s, actual %v", "x.x", version) - } -} - -func TestClient_Query(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var data client.Response - w.WriteHeader(http.StatusOK) - _ = json.NewEncoder(w).Encode(data) - })) - defer ts.Close() - - u, _ := url.Parse(ts.URL) - config := client.Config{URL: *u} - c, err := client.NewClient(config) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - - query := client.Query{} - _, err = c.Query(query) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } -} - -func TestClient_BasicAuth(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - u, p, ok := r.BasicAuth() - - if !ok { - t.Errorf("basic auth error") - } - if u != "username" { - t.Errorf("unexpected username, expected %q, actual %q", "username", u) - } - if p != "password" { - t.Errorf("unexpected password, expected %q, actual %q", "password", p) - } - w.WriteHeader(http.StatusNoContent) - })) - defer ts.Close() - - u, _ := url.Parse(ts.URL) - u.User = url.UserPassword("username", "password") - config := client.Config{URL: *u, Username: "username", Password: "password"} - c, err := client.NewClient(config) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - - _, _, err = c.Ping() - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } -} - -func TestClient_Write(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var data client.Response - w.WriteHeader(http.StatusNoContent) - _ = json.NewEncoder(w).Encode(data) - })) - defer ts.Close() - - u, _ := url.Parse(ts.URL) - config := client.Config{URL: *u} - c, err := client.NewClient(config) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - - bp := client.BatchPoints{} - r, err := c.Write(bp) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - if r != nil { - t.Fatalf("unexpected response. expected %v, actual %v", nil, r) - } -} - -func TestClient_UserAgent(t *testing.T) { - receivedUserAgent := "" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - receivedUserAgent = r.UserAgent() - - var data client.Response - w.WriteHeader(http.StatusOK) - _ = json.NewEncoder(w).Encode(data) - })) - defer ts.Close() - - _, err := http.Get(ts.URL) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - - tests := []struct { - name string - userAgent string - expected string - }{ - { - name: "Empty user agent", - userAgent: "", - expected: "InfluxDBClient", - }, - { - name: "Custom user agent", - userAgent: "Test Influx Client", - expected: "Test Influx Client", - }, - } - - for _, test := range tests { - u, _ := url.Parse(ts.URL) - config := client.Config{URL: *u, UserAgent: test.userAgent} - c, err := client.NewClient(config) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - - receivedUserAgent = "" - query := client.Query{} - _, err = c.Query(query) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - if !strings.HasPrefix(receivedUserAgent, test.expected) { - t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) - } - - receivedUserAgent = "" - bp := client.BatchPoints{} - _, err = c.Write(bp) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - if !strings.HasPrefix(receivedUserAgent, test.expected) { - t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) - } - - receivedUserAgent = "" - _, _, err = c.Ping() - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - if receivedUserAgent != test.expected { - t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) - } - } -} - -func TestPoint_UnmarshalEpoch(t *testing.T) { - now := time.Now() - tests := []struct { - name string - epoch int64 - precision string - expected time.Time - }{ - { - name: "nanoseconds", - epoch: now.UnixNano(), - precision: "n", - expected: now, - }, - { - name: "microseconds", - epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), - precision: "u", - expected: now.Round(time.Microsecond), - }, - { - name: "milliseconds", - epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), - precision: "ms", - expected: now.Round(time.Millisecond), - }, - { - name: "seconds", - epoch: now.Round(time.Second).UnixNano() / int64(time.Second), - precision: "s", - expected: now.Round(time.Second), - }, - { - name: "minutes", - epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), - precision: "m", - expected: now.Round(time.Minute), - }, - { - name: "hours", - epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), - precision: "h", - expected: now.Round(time.Hour), - }, - { - name: "max int64", - epoch: 9223372036854775807, - precision: "n", - expected: time.Unix(0, 9223372036854775807), - }, - { - name: "100 years from now", - epoch: now.Add(time.Hour * 24 * 365 * 100).UnixNano(), - precision: "n", - expected: now.Add(time.Hour * 24 * 365 * 100), - }, - } - - for _, test := range tests { - t.Logf("testing %q\n", test.name) - data := []byte(fmt.Sprintf(`{"time": %d, "precision":"%s"}`, test.epoch, test.precision)) - t.Logf("json: %s", string(data)) - var p client.Point - err := json.Unmarshal(data, &p) - if err != nil { - t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) - } - if !p.Time.Equal(test.expected) { - t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time) - } - } -} - -func TestPoint_UnmarshalRFC(t *testing.T) { - now := time.Now().UTC() - tests := []struct { - name string - rfc string - now time.Time - expected time.Time - }{ - { - name: "RFC3339Nano", - rfc: time.RFC3339Nano, - now: now, - expected: now, - }, - { - name: "RFC3339", - rfc: time.RFC3339, - now: now.Round(time.Second), - expected: now.Round(time.Second), - }, - } - - for _, test := range tests { - t.Logf("testing %q\n", test.name) - ts := test.now.Format(test.rfc) - data := []byte(fmt.Sprintf(`{"time": %q}`, ts)) - t.Logf("json: %s", string(data)) - var p client.Point - err := json.Unmarshal(data, &p) - if err != nil { - t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) - } - if !p.Time.Equal(test.expected) { - t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time) - } - } -} - -func TestPoint_MarshalOmitempty(t *testing.T) { - now := time.Now().UTC() - tests := []struct { - name string - point client.Point - now time.Time - expected string - }{ - { - name: "all empty", - point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}}, - now: now, - expected: `{"measurement":"cpu","fields":{"value":1.1}}`, - }, - { - name: "with time", - point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Time: now}, - now: now, - expected: fmt.Sprintf(`{"measurement":"cpu","time":"%s","fields":{"value":1.1}}`, now.Format(time.RFC3339Nano)), - }, - { - name: "with tags", - point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Tags: map[string]string{"foo": "bar"}}, - now: now, - expected: `{"measurement":"cpu","tags":{"foo":"bar"},"fields":{"value":1.1}}`, - }, - { - name: "with precision", - point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Precision: "ms"}, - now: now, - expected: `{"measurement":"cpu","fields":{"value":1.1},"precision":"ms"}`, - }, - } - - for _, test := range tests { - t.Logf("testing %q\n", test.name) - b, err := json.Marshal(&test.point) - if err != nil { - t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) - } - if test.expected != string(b) { - t.Fatalf("Unexpected result. expected: %v, actual: %v", test.expected, string(b)) - } - } -} - -func TestEpochToTime(t *testing.T) { - now := time.Now() - - tests := []struct { - name string - epoch int64 - precision string - expected time.Time - }{ - {name: "nanoseconds", epoch: now.UnixNano(), precision: "n", expected: now}, - {name: "microseconds", epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), precision: "u", expected: now.Round(time.Microsecond)}, - {name: "milliseconds", epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), precision: "ms", expected: now.Round(time.Millisecond)}, - {name: "seconds", epoch: now.Round(time.Second).UnixNano() / int64(time.Second), precision: "s", expected: now.Round(time.Second)}, - {name: "minutes", epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), precision: "m", expected: now.Round(time.Minute)}, - {name: "hours", epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), precision: "h", expected: now.Round(time.Hour)}, - } - - for _, test := range tests { - t.Logf("testing %q\n", test.name) - tm, e := client.EpochToTime(test.epoch, test.precision) - if e != nil { - t.Fatalf("unexpected error: expected %v, actual: %v", nil, e) - } - if tm != test.expected { - t.Fatalf("unexpected time: expected %v, actual %v", test.expected, tm) - } - } -} - -// helper functions - -func emptyTestServer() *httptest.Server { - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("X-Influxdb-Version", "x.x") - return - })) -} - -// Ensure that data with epoch times can be decoded. -func TestBatchPoints_Normal(t *testing.T) { - var bp client.BatchPoints - data := []byte(` -{ - "database": "foo", - "retentionPolicy": "bar", - "points": [ - { - "name": "cpu", - "tags": { - "host": "server01" - }, - "time": 14244733039069373, - "precision": "n", - "values": { - "value": 4541770385657154000 - } - }, - { - "name": "cpu", - "tags": { - "host": "server01" - }, - "time": 14244733039069380, - "precision": "n", - "values": { - "value": 7199311900554737000 - } - } - ] -} -`) - - if err := json.Unmarshal(data, &bp); err != nil { - t.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) - } -} - -func TestClient_Timeout(t *testing.T) { - done := make(chan bool) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - <-done - })) - defer ts.Close() - defer func() { done <- true }() - - u, _ := url.Parse(ts.URL) - config := client.Config{URL: *u, Timeout: 500 * time.Millisecond} - c, err := client.NewClient(config) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - - query := client.Query{} - _, err = c.Query(query) - if err == nil { - t.Fatalf("unexpected success. expected timeout error") - } else if !strings.Contains(err.Error(), "request canceled") && - !strings.Contains(err.Error(), "use of closed network connection") { - t.Fatalf("unexpected error. expected 'request canceled' error, got %v", err) - } -} - -func TestClient_NoTimeout(t *testing.T) { - if testing.Short() { - t.Skip("skipping in short mode") - } - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - time.Sleep(1 * time.Second) - var data client.Response - w.WriteHeader(http.StatusOK) - _ = json.NewEncoder(w).Encode(data) - })) - defer ts.Close() - - u, _ := url.Parse(ts.URL) - config := client.Config{URL: *u} - c, err := client.NewClient(config) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - - query := client.Query{} - _, err = c.Query(query) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } -} - -func TestClient_ParseConnectionString_IPv6(t *testing.T) { - path := "[fdf5:9ede:1875:0:a9ee:a600:8fe3:d495]:8086" - u, err := client.ParseConnectionString(path, false) - if err != nil { - t.Fatalf("unexpected error, expected %v, actual %v", nil, err) - } - if u.Host != path { - t.Fatalf("ipv6 parse failed, expected %s, actual %s", path, u.Host) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client.go deleted file mode 100644 index 29f0a458a..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client.go +++ /dev/null @@ -1,498 +0,0 @@ -package client - -import ( - "bytes" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "time" - - "github.com/influxdb/influxdb/models" -) - -// UDPPayloadSize is a reasonable default payload size for UDP packets that -// could be travelling over the internet. -const ( - UDPPayloadSize = 512 -) - -type HTTPConfig struct { - // Addr should be of the form "http://host:port" - // or "http://[ipv6-host%zone]:port". - Addr string - - // Username is the influxdb username, optional - Username string - - // Password is the influxdb password, optional - Password string - - // UserAgent is the http User Agent, defaults to "InfluxDBClient" - UserAgent string - - // Timeout for influxdb writes, defaults to no timeout - Timeout time.Duration - - // InsecureSkipVerify gets passed to the http client, if true, it will - // skip https certificate verification. Defaults to false - InsecureSkipVerify bool -} - -type UDPConfig struct { - // Addr should be of the form "udp://host:port" - // or "udp://[ipv6-host%zone]:port". - Addr string - - // PayloadSize is the maximum size of a UDP client message, optional - // Tune this based on your network. Defaults to UDPBufferSize. - PayloadSize int -} - -type BatchPointsConfig struct { - // Precision is the write precision of the points, defaults to "ns" - Precision string - - // Database is the database to write points to - Database string - - // RetentionPolicy is the retention policy of the points - RetentionPolicy string - - // Write consistency is the number of servers required to confirm write - WriteConsistency string -} - -// Client is a client interface for writing & querying the database -type Client interface { - // Write takes a BatchPoints object and writes all Points to InfluxDB. - Write(bp BatchPoints) error - - // Query makes an InfluxDB Query on the database. This will fail if using - // the UDP client. - Query(q Query) (*Response, error) - - // Close releases any resources a Client may be using. - Close() error -} - -// NewClient creates a client interface from the given config. -func NewHTTPClient(conf HTTPConfig) (Client, error) { - if conf.UserAgent == "" { - conf.UserAgent = "InfluxDBClient" - } - - u, err := url.Parse(conf.Addr) - if err != nil { - return nil, err - } else if u.Scheme != "http" && u.Scheme != "https" { - m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+ - " must start with http:// or https://", u.Scheme) - return nil, errors.New(m) - } - - tr := &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: conf.InsecureSkipVerify, - }, - } - return &client{ - url: u, - username: conf.Username, - password: conf.Password, - useragent: conf.UserAgent, - httpClient: &http.Client{ - Timeout: conf.Timeout, - Transport: tr, - }, - }, nil -} - -// Close releases the client's resources. -func (c *client) Close() error { - return nil -} - -// NewUDPClient returns a client interface for writing to an InfluxDB UDP -// service from the given config. -func NewUDPClient(conf UDPConfig) (Client, error) { - var udpAddr *net.UDPAddr - udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) - if err != nil { - return nil, err - } - - conn, err := net.DialUDP("udp", nil, udpAddr) - if err != nil { - return nil, err - } - - payloadSize := conf.PayloadSize - if payloadSize == 0 { - payloadSize = UDPPayloadSize - } - - return &udpclient{ - conn: conn, - payloadSize: payloadSize, - }, nil -} - -// Close releases the udpclient's resources. -func (uc *udpclient) Close() error { - return uc.conn.Close() -} - -type client struct { - url *url.URL - username string - password string - useragent string - httpClient *http.Client -} - -type udpclient struct { - conn *net.UDPConn - payloadSize int -} - -// BatchPoints is an interface into a batched grouping of points to write into -// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate -// batch for each goroutine. -type BatchPoints interface { - // AddPoint adds the given point to the Batch of points - AddPoint(p *Point) - // Points lists the points in the Batch - Points() []*Point - - // Precision returns the currently set precision of this Batch - Precision() string - // SetPrecision sets the precision of this batch. - SetPrecision(s string) error - - // Database returns the currently set database of this Batch - Database() string - // SetDatabase sets the database of this Batch - SetDatabase(s string) - - // WriteConsistency returns the currently set write consistency of this Batch - WriteConsistency() string - // SetWriteConsistency sets the write consistency of this Batch - SetWriteConsistency(s string) - - // RetentionPolicy returns the currently set retention policy of this Batch - RetentionPolicy() string - // SetRetentionPolicy sets the retention policy of this Batch - SetRetentionPolicy(s string) -} - -// NewBatchPoints returns a BatchPoints interface based on the given config. -func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) { - if conf.Precision == "" { - conf.Precision = "ns" - } - if _, err := time.ParseDuration("1" + conf.Precision); err != nil { - return nil, err - } - bp := &batchpoints{ - database: conf.Database, - precision: conf.Precision, - retentionPolicy: conf.RetentionPolicy, - writeConsistency: conf.WriteConsistency, - } - return bp, nil -} - -type batchpoints struct { - points []*Point - database string - precision string - retentionPolicy string - writeConsistency string -} - -func (bp *batchpoints) AddPoint(p *Point) { - bp.points = append(bp.points, p) -} - -func (bp *batchpoints) Points() []*Point { - return bp.points -} - -func (bp *batchpoints) Precision() string { - return bp.precision -} - -func (bp *batchpoints) Database() string { - return bp.database -} - -func (bp *batchpoints) WriteConsistency() string { - return bp.writeConsistency -} - -func (bp *batchpoints) RetentionPolicy() string { - return bp.retentionPolicy -} - -func (bp *batchpoints) SetPrecision(p string) error { - if _, err := time.ParseDuration("1" + p); err != nil { - return err - } - bp.precision = p - return nil -} - -func (bp *batchpoints) SetDatabase(db string) { - bp.database = db -} - -func (bp *batchpoints) SetWriteConsistency(wc string) { - bp.writeConsistency = wc -} - -func (bp *batchpoints) SetRetentionPolicy(rp string) { - bp.retentionPolicy = rp -} - -type Point struct { - pt models.Point -} - -// NewPoint returns a point with the given timestamp. If a timestamp is not -// given, then data is sent to the database without a timestamp, in which case -// the server will assign local time upon reception. NOTE: it is recommended -// to send data with a timestamp. -func NewPoint( - name string, - tags map[string]string, - fields map[string]interface{}, - t ...time.Time, -) (*Point, error) { - var T time.Time - if len(t) > 0 { - T = t[0] - } - - pt, err := models.NewPoint(name, tags, fields, T) - if err != nil { - return nil, err - } - return &Point{ - pt: pt, - }, nil -} - -// String returns a line-protocol string of the Point -func (p *Point) String() string { - return p.pt.String() -} - -// PrecisionString returns a line-protocol string of the Point, at precision -func (p *Point) PrecisionString(precison string) string { - return p.pt.PrecisionString(precison) -} - -// Name returns the measurement name of the point -func (p *Point) Name() string { - return p.pt.Name() -} - -// Name returns the tags associated with the point -func (p *Point) Tags() map[string]string { - return p.pt.Tags() -} - -// Time return the timestamp for the point -func (p *Point) Time() time.Time { - return p.pt.Time() -} - -// UnixNano returns the unix nano time of the point -func (p *Point) UnixNano() int64 { - return p.pt.UnixNano() -} - -// Fields returns the fields for the point -func (p *Point) Fields() map[string]interface{} { - return p.pt.Fields() -} - -func (uc *udpclient) Write(bp BatchPoints) error { - var b bytes.Buffer - var d time.Duration - d, _ = time.ParseDuration("1" + bp.Precision()) - - for _, p := range bp.Points() { - pointstring := p.pt.RoundedString(d) + "\n" - - // Write and reset the buffer if we reach the max size - if b.Len()+len(pointstring) >= uc.payloadSize { - if _, err := uc.conn.Write(b.Bytes()); err != nil { - return err - } - b.Reset() - } - - if _, err := b.WriteString(pointstring); err != nil { - return err - } - } - - _, err := uc.conn.Write(b.Bytes()) - return err -} - -func (c *client) Write(bp BatchPoints) error { - var b bytes.Buffer - - for _, p := range bp.Points() { - if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil { - return err - } - - if err := b.WriteByte('\n'); err != nil { - return err - } - } - - u := c.url - u.Path = "write" - req, err := http.NewRequest("POST", u.String(), &b) - if err != nil { - return err - } - req.Header.Set("Content-Type", "") - req.Header.Set("User-Agent", c.useragent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - - params := req.URL.Query() - params.Set("db", bp.Database()) - params.Set("rp", bp.RetentionPolicy()) - params.Set("precision", bp.Precision()) - params.Set("consistency", bp.WriteConsistency()) - req.URL.RawQuery = params.Encode() - - resp, err := c.httpClient.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { - var err = fmt.Errorf(string(body)) - return err - } - - return nil -} - -// Query defines a query to send to the server -type Query struct { - Command string - Database string - Precision string -} - -// NewQuery returns a query object -// database and precision strings can be empty strings if they are not needed -// for the query. -func NewQuery(command, database, precision string) Query { - return Query{ - Command: command, - Database: database, - Precision: precision, - } -} - -// Response represents a list of statement results. -type Response struct { - Results []Result - Err error -} - -// Error returns the first error from any statement. -// Returns nil if no errors occurred on any statements. -func (r *Response) Error() error { - if r.Err != nil { - return r.Err - } - for _, result := range r.Results { - if result.Err != nil { - return result.Err - } - } - return nil -} - -// Result represents a resultset returned from a single statement. -type Result struct { - Series []models.Row - Err error -} - -func (uc *udpclient) Query(q Query) (*Response, error) { - return nil, fmt.Errorf("Querying via UDP is not supported") -} - -// Query sends a command to the server and returns the Response -func (c *client) Query(q Query) (*Response, error) { - u := c.url - u.Path = "query" - - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "") - req.Header.Set("User-Agent", c.useragent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - - params := req.URL.Query() - params.Set("q", q.Command) - params.Set("db", q.Database) - if q.Precision != "" { - params.Set("epoch", q.Precision) - } - req.URL.RawQuery = params.Encode() - - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var response Response - dec := json.NewDecoder(resp.Body) - dec.UseNumber() - decErr := dec.Decode(&response) - - // ignore this error if we got an invalid status code - if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { - decErr = nil - } - // If we got a valid decode error, send that back - if decErr != nil { - return nil, decErr - } - // If we don't have an error in our json response, and didn't get statusOK - // then send back an error - if resp.StatusCode != http.StatusOK && response.Error() == nil { - return &response, fmt.Errorf("received status code %d from server", - resp.StatusCode) - } - return &response, nil -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client_test.go deleted file mode 100644 index 5f1e908a1..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client_test.go +++ /dev/null @@ -1,337 +0,0 @@ -package client - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "reflect" - "strings" - "testing" - "time" -) - -func TestUDPClient_Query(t *testing.T) { - config := UDPConfig{Addr: "localhost:8089"} - c, err := NewUDPClient(config) - if err != nil { - t.Errorf("unexpected error. expected %v, actual %v", nil, err) - } - defer c.Close() - query := Query{} - _, err = c.Query(query) - if err == nil { - t.Error("Querying UDP client should fail") - } -} - -func TestUDPClient_Write(t *testing.T) { - config := UDPConfig{Addr: "localhost:8089"} - c, err := NewUDPClient(config) - if err != nil { - t.Errorf("unexpected error. expected %v, actual %v", nil, err) - } - defer c.Close() - - bp, err := NewBatchPoints(BatchPointsConfig{}) - if err != nil { - t.Errorf("unexpected error. expected %v, actual %v", nil, err) - } - - fields := make(map[string]interface{}) - fields["value"] = 1.0 - pt, _ := NewPoint("cpu", make(map[string]string), fields) - bp.AddPoint(pt) - - err = c.Write(bp) - if err != nil { - t.Errorf("unexpected error. expected %v, actual %v", nil, err) - } -} - -func TestUDPClient_BadAddr(t *testing.T) { - config := UDPConfig{Addr: "foobar@wahoo"} - c, err := NewUDPClient(config) - if err == nil { - defer c.Close() - t.Error("Expected resolve error") - } -} - -func TestClient_Query(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var data Response - w.WriteHeader(http.StatusOK) - _ = json.NewEncoder(w).Encode(data) - })) - defer ts.Close() - - config := HTTPConfig{Addr: ts.URL} - c, _ := NewHTTPClient(config) - defer c.Close() - - query := Query{} - _, err := c.Query(query) - if err != nil { - t.Errorf("unexpected error. expected %v, actual %v", nil, err) - } -} - -func TestClient_BasicAuth(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - u, p, ok := r.BasicAuth() - - if !ok { - t.Errorf("basic auth error") - } - if u != "username" { - t.Errorf("unexpected username, expected %q, actual %q", "username", u) - } - if p != "password" { - t.Errorf("unexpected password, expected %q, actual %q", "password", p) - } - var data Response - w.WriteHeader(http.StatusOK) - _ = json.NewEncoder(w).Encode(data) - })) - defer ts.Close() - - config := HTTPConfig{Addr: ts.URL, Username: "username", Password: "password"} - c, _ := NewHTTPClient(config) - defer c.Close() - - query := Query{} - _, err := c.Query(query) - if err != nil { - t.Errorf("unexpected error. expected %v, actual %v", nil, err) - } -} - -func TestClient_Write(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var data Response - w.WriteHeader(http.StatusNoContent) - _ = json.NewEncoder(w).Encode(data) - })) - defer ts.Close() - - config := HTTPConfig{Addr: ts.URL} - c, _ := NewHTTPClient(config) - defer c.Close() - - bp, err := NewBatchPoints(BatchPointsConfig{}) - if err != nil { - t.Errorf("unexpected error. expected %v, actual %v", nil, err) - } - err = c.Write(bp) - if err != nil { - t.Errorf("unexpected error. expected %v, actual %v", nil, err) - } -} - -func TestClient_UserAgent(t *testing.T) { - receivedUserAgent := "" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - receivedUserAgent = r.UserAgent() - - var data Response - w.WriteHeader(http.StatusOK) - _ = json.NewEncoder(w).Encode(data) - })) - defer ts.Close() - - _, err := http.Get(ts.URL) - if err != nil { - t.Errorf("unexpected error. expected %v, actual %v", nil, err) - } - - tests := []struct { - name string - userAgent string - expected string - }{ - { - name: "Empty user agent", - userAgent: "", - expected: "InfluxDBClient", - }, - { - name: "Custom user agent", - userAgent: "Test Influx Client", - expected: "Test Influx Client", - }, - } - - for _, test := range tests { - - config := HTTPConfig{Addr: ts.URL, UserAgent: test.userAgent} - c, _ := NewHTTPClient(config) - defer c.Close() - - receivedUserAgent = "" - query := Query{} - _, err = c.Query(query) - if err != nil { - t.Errorf("unexpected error. expected %v, actual %v", nil, err) - } - if !strings.HasPrefix(receivedUserAgent, test.expected) { - t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) - } - - receivedUserAgent = "" - bp, _ := NewBatchPoints(BatchPointsConfig{}) - err = c.Write(bp) - if err != nil { - t.Errorf("unexpected error. expected %v, actual %v", nil, err) - } - if !strings.HasPrefix(receivedUserAgent, test.expected) { - t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) - } - - receivedUserAgent = "" - _, err := c.Query(query) - if err != nil { - t.Errorf("unexpected error. expected %v, actual %v", nil, err) - } - if receivedUserAgent != test.expected { - t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) - } - } -} - -func TestClient_PointString(t *testing.T) { - const shortForm = "2006-Jan-02" - time1, _ := time.Parse(shortForm, "2013-Feb-03") - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} - p, _ := NewPoint("cpu_usage", tags, fields, time1) - - s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000000000" - if p.String() != s { - t.Errorf("Point String Error, got %s, expected %s", p.String(), s) - } - - s = "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000" - if p.PrecisionString("ms") != s { - t.Errorf("Point String Error, got %s, expected %s", - p.PrecisionString("ms"), s) - } -} - -func TestClient_PointWithoutTimeString(t *testing.T) { - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} - p, _ := NewPoint("cpu_usage", tags, fields) - - s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39" - if p.String() != s { - t.Errorf("Point String Error, got %s, expected %s", p.String(), s) - } - - if p.PrecisionString("ms") != s { - t.Errorf("Point String Error, got %s, expected %s", - p.PrecisionString("ms"), s) - } -} - -func TestClient_PointName(t *testing.T) { - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} - p, _ := NewPoint("cpu_usage", tags, fields) - - exp := "cpu_usage" - if p.Name() != exp { - t.Errorf("Error, got %s, expected %s", - p.Name(), exp) - } -} - -func TestClient_PointTags(t *testing.T) { - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} - p, _ := NewPoint("cpu_usage", tags, fields) - - if !reflect.DeepEqual(tags, p.Tags()) { - t.Errorf("Error, got %v, expected %v", - p.Tags(), tags) - } -} - -func TestClient_PointUnixNano(t *testing.T) { - const shortForm = "2006-Jan-02" - time1, _ := time.Parse(shortForm, "2013-Feb-03") - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} - p, _ := NewPoint("cpu_usage", tags, fields, time1) - - exp := int64(1359849600000000000) - if p.UnixNano() != exp { - t.Errorf("Error, got %d, expected %d", - p.UnixNano(), exp) - } -} - -func TestClient_PointFields(t *testing.T) { - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} - p, _ := NewPoint("cpu_usage", tags, fields) - - if !reflect.DeepEqual(fields, p.Fields()) { - t.Errorf("Error, got %v, expected %v", - p.Fields(), fields) - } -} - -func TestBatchPoints_PrecisionError(t *testing.T) { - _, err := NewBatchPoints(BatchPointsConfig{Precision: "foobar"}) - if err == nil { - t.Errorf("Precision: foobar should have errored") - } - - bp, _ := NewBatchPoints(BatchPointsConfig{Precision: "ns"}) - err = bp.SetPrecision("foobar") - if err == nil { - t.Errorf("Precision: foobar should have errored") - } -} - -func TestBatchPoints_SettersGetters(t *testing.T) { - bp, _ := NewBatchPoints(BatchPointsConfig{ - Precision: "ns", - Database: "db", - RetentionPolicy: "rp", - WriteConsistency: "wc", - }) - if bp.Precision() != "ns" { - t.Errorf("Expected: %s, got %s", bp.Precision(), "ns") - } - if bp.Database() != "db" { - t.Errorf("Expected: %s, got %s", bp.Database(), "db") - } - if bp.RetentionPolicy() != "rp" { - t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp") - } - if bp.WriteConsistency() != "wc" { - t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc") - } - - bp.SetDatabase("db2") - bp.SetRetentionPolicy("rp2") - bp.SetWriteConsistency("wc2") - err := bp.SetPrecision("s") - if err != nil { - t.Errorf("Did not expect error: %s", err.Error()) - } - - if bp.Precision() != "s" { - t.Errorf("Expected: %s, got %s", bp.Precision(), "s") - } - if bp.Database() != "db2" { - t.Errorf("Expected: %s, got %s", bp.Database(), "db2") - } - if bp.RetentionPolicy() != "rp2" { - t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp2") - } - if bp.WriteConsistency() != "wc2" { - t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc2") - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/example_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/example_test.go deleted file mode 100644 index ae899d8ee..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/example_test.go +++ /dev/null @@ -1,248 +0,0 @@ -package client_test - -import ( - "fmt" - "math/rand" - "os" - "time" - - "github.com/influxdb/influxdb/client/v2" -) - -// Create a new client -func ExampleClient() { - // NOTE: this assumes you've setup a user and have setup shell env variables, - // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below. - _, err := client.NewHTTPClient(client.HTTPConfig{ - Addr: "http://localhost:8086", - Username: os.Getenv("INFLUX_USER"), - Password: os.Getenv("INFLUX_PWD"), - }) - if err != nil { - fmt.Println("Error creating InfluxDB Client: ", err.Error()) - } -} - -// Write a point using the UDP client -func ExampleClient_uDP() { - // Make client - config := client.UDPConfig{Addr: "localhost:8089"} - c, err := client.NewUDPClient(config) - if err != nil { - fmt.Println("Error: ", err.Error()) - } - defer c.Close() - - // Create a new point batch - bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ - Precision: "s", - }) - - // Create a point and add to batch - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{ - "idle": 10.1, - "system": 53.3, - "user": 46.6, - } - pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) - if err != nil { - fmt.Println("Error: ", err.Error()) - } - bp.AddPoint(pt) - - // Write the batch - c.Write(bp) -} - -// Write a point using the HTTP client -func ExampleClient_write() { - // Make client - c, err := client.NewHTTPClient(client.HTTPConfig{ - Addr: "http://localhost:8086", - }) - if err != nil { - fmt.Println("Error creating InfluxDB Client: ", err.Error()) - } - defer c.Close() - - // Create a new point batch - bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ - Database: "BumbleBeeTuna", - Precision: "s", - }) - - // Create a point and add to batch - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{ - "idle": 10.1, - "system": 53.3, - "user": 46.6, - } - pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) - if err != nil { - fmt.Println("Error: ", err.Error()) - } - bp.AddPoint(pt) - - // Write the batch - c.Write(bp) -} - -// Create a batch and add a point -func ExampleBatchPoints() { - // Create a new point batch - bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ - Database: "BumbleBeeTuna", - Precision: "s", - }) - - // Create a point and add to batch - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{ - "idle": 10.1, - "system": 53.3, - "user": 46.6, - } - pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) - if err != nil { - fmt.Println("Error: ", err.Error()) - } - bp.AddPoint(pt) -} - -// Using the BatchPoints setter functions -func ExampleBatchPoints_setters() { - // Create a new point batch - bp, _ := client.NewBatchPoints(client.BatchPointsConfig{}) - bp.SetDatabase("BumbleBeeTuna") - bp.SetPrecision("ms") - - // Create a point and add to batch - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{ - "idle": 10.1, - "system": 53.3, - "user": 46.6, - } - pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) - if err != nil { - fmt.Println("Error: ", err.Error()) - } - bp.AddPoint(pt) -} - -// Create a new point with a timestamp -func ExamplePoint() { - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{ - "idle": 10.1, - "system": 53.3, - "user": 46.6, - } - pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) - if err == nil { - fmt.Println("We created a point: ", pt.String()) - } -} - -// Create a new point without a timestamp -func ExamplePoint_withoutTime() { - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{ - "idle": 10.1, - "system": 53.3, - "user": 46.6, - } - pt, err := client.NewPoint("cpu_usage", tags, fields) - if err == nil { - fmt.Println("We created a point w/o time: ", pt.String()) - } -} - -// Write 1000 points -func ExampleClient_write1000() { - sampleSize := 1000 - - // Make client - c, err := client.NewHTTPClient(client.HTTPConfig{ - Addr: "http://localhost:8086", - }) - if err != nil { - fmt.Println("Error creating InfluxDB Client: ", err.Error()) - } - defer c.Close() - - rand.Seed(42) - - bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ - Database: "systemstats", - Precision: "us", - }) - - for i := 0; i < sampleSize; i++ { - regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"} - tags := map[string]string{ - "cpu": "cpu-total", - "host": fmt.Sprintf("host%d", rand.Intn(1000)), - "region": regions[rand.Intn(len(regions))], - } - - idle := rand.Float64() * 100.0 - fields := map[string]interface{}{ - "idle": idle, - "busy": 100.0 - idle, - } - - pt, err := client.NewPoint( - "cpu_usage", - tags, - fields, - time.Now(), - ) - if err != nil { - println("Error:", err.Error()) - continue - } - bp.AddPoint(pt) - } - - err = c.Write(bp) - if err != nil { - fmt.Println("Error: ", err.Error()) - } -} - -// Make a Query -func ExampleClient_query() { - // Make client - c, err := client.NewHTTPClient(client.HTTPConfig{ - Addr: "http://localhost:8086", - }) - if err != nil { - fmt.Println("Error creating InfluxDB Client: ", err.Error()) - } - defer c.Close() - - q := client.NewQuery("SELECT count(value) FROM shapes", "square_holes", "ns") - if response, err := c.Query(q); err == nil && response.Error() == nil { - fmt.Println(response.Results) - } -} - -// Create a Database with a query -func ExampleClient_createDatabase() { - // Make client - c, err := client.NewHTTPClient(client.HTTPConfig{ - Addr: "http://localhost:8086", - }) - if err != nil { - fmt.Println("Error creating InfluxDB Client: ", err.Error()) - } - defer c.Close() - - q := client.NewQuery("CREATE DATABASE telegraf", "", "") - if response, err := c.Query(q); err == nil && response.Error() == nil { - fmt.Println(response.Results) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/balancer.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/balancer.go deleted file mode 100644 index cf7efddd2..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/balancer.go +++ /dev/null @@ -1,78 +0,0 @@ -package cluster - -import ( - "math/rand" - - "github.com/influxdb/influxdb/meta" -) - -// Balancer represents a load-balancing algorithm for a set of nodes -type Balancer interface { - // Next returns the next Node according to the balancing method - // or nil if there are no nodes available - Next() *meta.NodeInfo -} - -type nodeBalancer struct { - nodes []meta.NodeInfo // data nodes to balance between - p int // current node index -} - -// NewNodeBalancer create a shuffled, round-robin balancer so that -// multiple instances will return nodes in randomized order and each -// each returned node will be repeated in a cycle -func NewNodeBalancer(nodes []meta.NodeInfo) Balancer { - // make a copy of the node slice so we can randomize it - // without affecting the original instance as well as ensure - // that each Balancer returns nodes in a different order - b := &nodeBalancer{} - - b.nodes = make([]meta.NodeInfo, len(nodes)) - copy(b.nodes, nodes) - - b.shuffle() - return b -} - -// shuffle randomizes the ordering the balancers available nodes -func (b *nodeBalancer) shuffle() { - for i := range b.nodes { - j := rand.Intn(i + 1) - b.nodes[i], b.nodes[j] = b.nodes[j], b.nodes[i] - } -} - -// online returns a slice of the nodes that are online -func (b *nodeBalancer) online() []meta.NodeInfo { - return b.nodes - // now := time.Now().UTC() - // up := []meta.NodeInfo{} - // for _, n := range b.nodes { - // if n.OfflineUntil.After(now) { - // continue - // } - // up = append(up, n) - // } - // return up -} - -// Next returns the next available nodes -func (b *nodeBalancer) Next() *meta.NodeInfo { - // only use online nodes - up := b.online() - - // no nodes online - if len(up) == 0 { - return nil - } - - // rollover back to the beginning - if b.p >= len(up) { - b.p = 0 - } - - d := &up[b.p] - b.p++ - - return d -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/balancer_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/balancer_test.go deleted file mode 100644 index dd4a834cb..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/balancer_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package cluster_test - -import ( - "fmt" - "testing" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/meta" -) - -func NewNodes() []meta.NodeInfo { - var nodes []meta.NodeInfo - for i := 1; i <= 2; i++ { - nodes = append(nodes, meta.NodeInfo{ - ID: uint64(i), - Host: fmt.Sprintf("localhost:999%d", i), - }) - } - return nodes -} - -func TestBalancerEmptyNodes(t *testing.T) { - b := cluster.NewNodeBalancer([]meta.NodeInfo{}) - got := b.Next() - if got != nil { - t.Errorf("expected nil, got %v", got) - } -} - -func TestBalancerUp(t *testing.T) { - nodes := NewNodes() - b := cluster.NewNodeBalancer(nodes) - - // First node in randomized round-robin order - first := b.Next() - if first == nil { - t.Errorf("expected datanode, got %v", first) - } - - // Second node in randomized round-robin order - second := b.Next() - if second == nil { - t.Errorf("expected datanode, got %v", second) - } - - // Should never get the same node in order twice - if first.ID == second.ID { - t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) - } -} - -/* -func TestBalancerDown(t *testing.T) { - nodes := NewNodes() - b := cluster.NewNodeBalancer(nodes) - - nodes[0].Down() - - // First node in randomized round-robin order - first := b.Next() - if first == nil { - t.Errorf("expected datanode, got %v", first) - } - - // Second node should rollover to the first up node - second := b.Next() - if second == nil { - t.Errorf("expected datanode, got %v", second) - } - - // Health node should be returned each time - if first.ID != 2 && first.ID != second.ID { - t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) - } -} -*/ - -/* -func TestBalancerBackUp(t *testing.T) { - nodes := newDataNodes() - b := cluster.NewNodeBalancer(nodes) - - nodes[0].Down() - - for i := 0; i < 3; i++ { - got := b.Next() - if got == nil { - t.Errorf("expected datanode, got %v", got) - } - - if exp := uint64(2); got.ID != exp { - t.Errorf("wrong node id: exp %v, got %v", exp, got.ID) - } - } - - nodes[0].Up() - - // First node in randomized round-robin order - first := b.Next() - if first == nil { - t.Errorf("expected datanode, got %v", first) - } - - // Second node should rollover to the first up node - second := b.Next() - if second == nil { - t.Errorf("expected datanode, got %v", second) - } - - // Should get both nodes returned - if first.ID == second.ID { - t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) - } -} -*/ diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/client_pool.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/client_pool.go deleted file mode 100644 index fed7e18e0..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/client_pool.go +++ /dev/null @@ -1,57 +0,0 @@ -package cluster - -import ( - "net" - "sync" - - "gopkg.in/fatih/pool.v2" -) - -type clientPool struct { - mu sync.RWMutex - pool map[uint64]pool.Pool -} - -func newClientPool() *clientPool { - return &clientPool{ - pool: make(map[uint64]pool.Pool), - } -} - -func (c *clientPool) setPool(nodeID uint64, p pool.Pool) { - c.mu.Lock() - c.pool[nodeID] = p - c.mu.Unlock() -} - -func (c *clientPool) getPool(nodeID uint64) (pool.Pool, bool) { - c.mu.RLock() - p, ok := c.pool[nodeID] - c.mu.RUnlock() - return p, ok -} - -func (c *clientPool) size() int { - c.mu.RLock() - var size int - for _, p := range c.pool { - size += p.Len() - } - c.mu.RUnlock() - return size -} - -func (c *clientPool) conn(nodeID uint64) (net.Conn, error) { - c.mu.RLock() - conn, err := c.pool[nodeID].Get() - c.mu.RUnlock() - return conn, err -} - -func (c *clientPool) close() { - c.mu.Lock() - for _, p := range c.pool { - p.Close() - } - c.mu.Unlock() -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config.go deleted file mode 100644 index 3a67b32d0..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config.go +++ /dev/null @@ -1,35 +0,0 @@ -package cluster - -import ( - "time" - - "github.com/influxdb/influxdb/toml" -) - -const ( - // DefaultWriteTimeout is the default timeout for a complete write to succeed. - DefaultWriteTimeout = 5 * time.Second - - // DefaultShardWriterTimeout is the default timeout set on shard writers. - DefaultShardWriterTimeout = 5 * time.Second - - // DefaultShardMapperTimeout is the default timeout set on shard mappers. - DefaultShardMapperTimeout = 5 * time.Second -) - -// Config represents the configuration for the clustering service. -type Config struct { - ForceRemoteShardMapping bool `toml:"force-remote-mapping"` - WriteTimeout toml.Duration `toml:"write-timeout"` - ShardWriterTimeout toml.Duration `toml:"shard-writer-timeout"` - ShardMapperTimeout toml.Duration `toml:"shard-mapper-timeout"` -} - -// NewConfig returns an instance of Config with defaults. -func NewConfig() Config { - return Config{ - WriteTimeout: toml.Duration(DefaultWriteTimeout), - ShardWriterTimeout: toml.Duration(DefaultShardWriterTimeout), - ShardMapperTimeout: toml.Duration(DefaultShardMapperTimeout), - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config_test.go deleted file mode 100644 index db5e5ddc1..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package cluster_test - -import ( - "testing" - "time" - - "github.com/BurntSushi/toml" - "github.com/influxdb/influxdb/cluster" -) - -func TestConfig_Parse(t *testing.T) { - // Parse configuration. - var c cluster.Config - if _, err := toml.Decode(` -shard-writer-timeout = "10s" -write-timeout = "20s" -`, &c); err != nil { - t.Fatal(err) - } - - // Validate configuration. - if time.Duration(c.ShardWriterTimeout) != 10*time.Second { - t.Fatalf("unexpected shard-writer timeout: %s", c.ShardWriterTimeout) - } else if time.Duration(c.WriteTimeout) != 20*time.Second { - t.Fatalf("unexpected write timeout s: %s", c.WriteTimeout) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go deleted file mode 100644 index f95463903..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go +++ /dev/null @@ -1,154 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: internal/data.proto -// DO NOT EDIT! - -/* -Package internal is a generated protocol buffer package. - -It is generated from these files: - internal/data.proto - -It has these top-level messages: - WriteShardRequest - WriteShardResponse - MapShardRequest - MapShardResponse -*/ -package internal - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type WriteShardRequest struct { - ShardID *uint64 `protobuf:"varint,1,req,name=ShardID" json:"ShardID,omitempty"` - Points [][]byte `protobuf:"bytes,2,rep,name=Points" json:"Points,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *WriteShardRequest) Reset() { *m = WriteShardRequest{} } -func (m *WriteShardRequest) String() string { return proto.CompactTextString(m) } -func (*WriteShardRequest) ProtoMessage() {} - -func (m *WriteShardRequest) GetShardID() uint64 { - if m != nil && m.ShardID != nil { - return *m.ShardID - } - return 0 -} - -func (m *WriteShardRequest) GetPoints() [][]byte { - if m != nil { - return m.Points - } - return nil -} - -type WriteShardResponse struct { - Code *int32 `protobuf:"varint,1,req,name=Code" json:"Code,omitempty"` - Message *string `protobuf:"bytes,2,opt,name=Message" json:"Message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *WriteShardResponse) Reset() { *m = WriteShardResponse{} } -func (m *WriteShardResponse) String() string { return proto.CompactTextString(m) } -func (*WriteShardResponse) ProtoMessage() {} - -func (m *WriteShardResponse) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *WriteShardResponse) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -type MapShardRequest struct { - ShardID *uint64 `protobuf:"varint,1,req,name=ShardID" json:"ShardID,omitempty"` - Query *string `protobuf:"bytes,2,req,name=Query" json:"Query,omitempty"` - ChunkSize *int32 `protobuf:"varint,3,req,name=ChunkSize" json:"ChunkSize,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MapShardRequest) Reset() { *m = MapShardRequest{} } -func (m *MapShardRequest) String() string { return proto.CompactTextString(m) } -func (*MapShardRequest) ProtoMessage() {} - -func (m *MapShardRequest) GetShardID() uint64 { - if m != nil && m.ShardID != nil { - return *m.ShardID - } - return 0 -} - -func (m *MapShardRequest) GetQuery() string { - if m != nil && m.Query != nil { - return *m.Query - } - return "" -} - -func (m *MapShardRequest) GetChunkSize() int32 { - if m != nil && m.ChunkSize != nil { - return *m.ChunkSize - } - return 0 -} - -type MapShardResponse struct { - Code *int32 `protobuf:"varint,1,req,name=Code" json:"Code,omitempty"` - Message *string `protobuf:"bytes,2,opt,name=Message" json:"Message,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=Data" json:"Data,omitempty"` - TagSets []string `protobuf:"bytes,4,rep,name=TagSets" json:"TagSets,omitempty"` - Fields []string `protobuf:"bytes,5,rep,name=Fields" json:"Fields,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MapShardResponse) Reset() { *m = MapShardResponse{} } -func (m *MapShardResponse) String() string { return proto.CompactTextString(m) } -func (*MapShardResponse) ProtoMessage() {} - -func (m *MapShardResponse) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *MapShardResponse) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -func (m *MapShardResponse) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *MapShardResponse) GetTagSets() []string { - if m != nil { - return m.TagSets - } - return nil -} - -func (m *MapShardResponse) GetFields() []string { - if m != nil { - return m.Fields - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto deleted file mode 100644 index fed14bad9..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto +++ /dev/null @@ -1,25 +0,0 @@ -package internal; - -message WriteShardRequest { - required uint64 ShardID = 1; - repeated bytes Points = 2; -} - -message WriteShardResponse { - required int32 Code = 1; - optional string Message = 2; -} - -message MapShardRequest { - required uint64 ShardID = 1; - required string Query = 2; - required int32 ChunkSize = 3; -} - -message MapShardResponse { - required int32 Code = 1; - optional string Message = 2; - optional bytes Data = 3; - repeated string TagSets = 4; - repeated string Fields = 5; -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer.go deleted file mode 100644 index 915058902..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer.go +++ /dev/null @@ -1,394 +0,0 @@ -package cluster - -import ( - "errors" - "expvar" - "fmt" - "log" - "os" - "strings" - "sync" - "time" - - "github.com/influxdb/influxdb" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/models" - "github.com/influxdb/influxdb/tsdb" -) - -// ConsistencyLevel represent a required replication criteria before a write can -// be returned as successful -type ConsistencyLevel int - -// The statistics generated by the "write" mdoule -const ( - statWriteReq = "req" - statPointWriteReq = "pointReq" - statPointWriteReqLocal = "pointReqLocal" - statPointWriteReqRemote = "pointReqRemote" - statWriteOK = "writeOk" - statWritePartial = "writePartial" - statWriteTimeout = "writeTimeout" - statWriteErr = "writeError" - statWritePointReqHH = "pointReqHH" - statSubWriteOK = "subWriteOk" - statSubWriteDrop = "subWriteDrop" -) - -const ( - // ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet - ConsistencyLevelAny ConsistencyLevel = iota - - // ConsistencyLevelOne requires at least one data node acknowledged a write - ConsistencyLevelOne - - // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write - ConsistencyLevelQuorum - - // ConsistencyLevelAll requires all data nodes to acknowledge a write - ConsistencyLevelAll -) - -var ( - // ErrTimeout is returned when a write times out. - ErrTimeout = errors.New("timeout") - - // ErrPartialWrite is returned when a write partially succeeds but does - // not meet the requested consistency level. - ErrPartialWrite = errors.New("partial write") - - // ErrWriteFailed is returned when no writes succeeded. - ErrWriteFailed = errors.New("write failed") - - // ErrInvalidConsistencyLevel is returned when parsing the string version - // of a consistency level. - ErrInvalidConsistencyLevel = errors.New("invalid consistency level") -) - -// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const -func ParseConsistencyLevel(level string) (ConsistencyLevel, error) { - switch strings.ToLower(level) { - case "any": - return ConsistencyLevelAny, nil - case "one": - return ConsistencyLevelOne, nil - case "quorum": - return ConsistencyLevelQuorum, nil - case "all": - return ConsistencyLevelAll, nil - default: - return 0, ErrInvalidConsistencyLevel - } -} - -// PointsWriter handles writes across multiple local and remote data nodes. -type PointsWriter struct { - mu sync.RWMutex - closing chan struct{} - WriteTimeout time.Duration - Logger *log.Logger - - MetaStore interface { - NodeID() uint64 - Database(name string) (di *meta.DatabaseInfo, err error) - RetentionPolicy(database, policy string) (*meta.RetentionPolicyInfo, error) - CreateShardGroupIfNotExists(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) - ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) - } - - TSDBStore interface { - CreateShard(database, retentionPolicy string, shardID uint64) error - WriteToShard(shardID uint64, points []models.Point) error - } - - ShardWriter interface { - WriteShard(shardID, ownerID uint64, points []models.Point) error - } - - HintedHandoff interface { - WriteShard(shardID, ownerID uint64, points []models.Point) error - } - - Subscriber interface { - Points() chan<- *WritePointsRequest - } - subPoints chan<- *WritePointsRequest - - statMap *expvar.Map -} - -// NewPointsWriter returns a new instance of PointsWriter for a node. -func NewPointsWriter() *PointsWriter { - return &PointsWriter{ - closing: make(chan struct{}), - WriteTimeout: DefaultWriteTimeout, - Logger: log.New(os.Stderr, "[write] ", log.LstdFlags), - statMap: influxdb.NewStatistics("write", "write", nil), - } -} - -// ShardMapping contains a mapping of a shards to a points. -type ShardMapping struct { - Points map[uint64][]models.Point // The points associated with a shard ID - Shards map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID -} - -// NewShardMapping creates an empty ShardMapping -func NewShardMapping() *ShardMapping { - return &ShardMapping{ - Points: map[uint64][]models.Point{}, - Shards: map[uint64]*meta.ShardInfo{}, - } -} - -// MapPoint maps a point to shard -func (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p models.Point) { - points, ok := s.Points[shardInfo.ID] - if !ok { - s.Points[shardInfo.ID] = []models.Point{p} - } else { - s.Points[shardInfo.ID] = append(points, p) - } - s.Shards[shardInfo.ID] = shardInfo -} - -// Open opens the communication channel with the point writer -func (w *PointsWriter) Open() error { - w.mu.Lock() - defer w.mu.Unlock() - w.closing = make(chan struct{}) - if w.Subscriber != nil { - w.subPoints = w.Subscriber.Points() - } - return nil -} - -// Close closes the communication channel with the point writer -func (w *PointsWriter) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - if w.closing != nil { - close(w.closing) - } - if w.subPoints != nil { - // 'nil' channels always block so this makes the - // select statement in WritePoints hit its default case - // dropping any in-flight writes. - w.subPoints = nil - } - return nil -} - -// MapShards maps the points contained in wp to a ShardMapping. If a point -// maps to a shard group or shard that does not currently exist, it will be -// created before returning the mapping. -func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error) { - - // holds the start time ranges for required shard groups - timeRanges := map[time.Time]*meta.ShardGroupInfo{} - - rp, err := w.MetaStore.RetentionPolicy(wp.Database, wp.RetentionPolicy) - if err != nil { - return nil, err - } - if rp == nil { - return nil, influxdb.ErrRetentionPolicyNotFound(wp.RetentionPolicy) - } - - for _, p := range wp.Points { - timeRanges[p.Time().Truncate(rp.ShardGroupDuration)] = nil - } - - // holds all the shard groups and shards that are required for writes - for t := range timeRanges { - sg, err := w.MetaStore.CreateShardGroupIfNotExists(wp.Database, wp.RetentionPolicy, t) - if err != nil { - return nil, err - } - timeRanges[t] = sg - } - - mapping := NewShardMapping() - for _, p := range wp.Points { - sg := timeRanges[p.Time().Truncate(rp.ShardGroupDuration)] - sh := sg.ShardFor(p.HashID()) - mapping.MapPoint(&sh, p) - } - return mapping, nil -} - -// WritePointsInto is a copy of WritePoints that uses a tsdb structure instead of -// a cluster structure for information. This is to avoid a circular dependency -func (w *PointsWriter) WritePointsInto(p *tsdb.IntoWriteRequest) error { - req := WritePointsRequest{ - Database: p.Database, - RetentionPolicy: p.RetentionPolicy, - ConsistencyLevel: ConsistencyLevelAny, - Points: p.Points, - } - return w.WritePoints(&req) -} - -// WritePoints writes across multiple local and remote data nodes according the consistency level. -func (w *PointsWriter) WritePoints(p *WritePointsRequest) error { - w.statMap.Add(statWriteReq, 1) - w.statMap.Add(statPointWriteReq, int64(len(p.Points))) - - if p.RetentionPolicy == "" { - db, err := w.MetaStore.Database(p.Database) - if err != nil { - return err - } else if db == nil { - return influxdb.ErrDatabaseNotFound(p.Database) - } - p.RetentionPolicy = db.DefaultRetentionPolicy - } - - shardMappings, err := w.MapShards(p) - if err != nil { - return err - } - - // Write each shard in it's own goroutine and return as soon - // as one fails. - ch := make(chan error, len(shardMappings.Points)) - for shardID, points := range shardMappings.Points { - go func(shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) { - ch <- w.writeToShard(shard, p.Database, p.RetentionPolicy, p.ConsistencyLevel, points) - }(shardMappings.Shards[shardID], p.Database, p.RetentionPolicy, points) - } - - // Send points to subscriptions if possible. - ok := false - // We need to lock just in case the channel is about to be nil'ed - w.mu.RLock() - select { - case w.subPoints <- p: - ok = true - default: - } - w.mu.RUnlock() - if ok { - w.statMap.Add(statSubWriteOK, 1) - } else { - w.statMap.Add(statSubWriteDrop, 1) - } - - for range shardMappings.Points { - select { - case <-w.closing: - return ErrWriteFailed - case err := <-ch: - if err != nil { - return err - } - } - } - return nil -} - -// writeToShards writes points to a shard and ensures a write consistency level has been met. If the write -// partially succeeds, ErrPartialWrite is returned. -func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPolicy string, - consistency ConsistencyLevel, points []models.Point) error { - // The required number of writes to achieve the requested consistency level - required := len(shard.Owners) - switch consistency { - case ConsistencyLevelAny, ConsistencyLevelOne: - required = 1 - case ConsistencyLevelQuorum: - required = required/2 + 1 - } - - // response channel for each shard writer go routine - type AsyncWriteResult struct { - Owner meta.ShardOwner - Err error - } - ch := make(chan *AsyncWriteResult, len(shard.Owners)) - - for _, owner := range shard.Owners { - go func(shardID uint64, owner meta.ShardOwner, points []models.Point) { - if w.MetaStore.NodeID() == owner.NodeID { - w.statMap.Add(statPointWriteReqLocal, int64(len(points))) - - err := w.TSDBStore.WriteToShard(shardID, points) - // If we've written to shard that should exist on the current node, but the store has - // not actually created this shard, tell it to create it and retry the write - if err == tsdb.ErrShardNotFound { - err = w.TSDBStore.CreateShard(database, retentionPolicy, shardID) - if err != nil { - ch <- &AsyncWriteResult{owner, err} - return - } - err = w.TSDBStore.WriteToShard(shardID, points) - } - ch <- &AsyncWriteResult{owner, err} - return - } - - w.statMap.Add(statPointWriteReqRemote, int64(len(points))) - err := w.ShardWriter.WriteShard(shardID, owner.NodeID, points) - if err != nil && tsdb.IsRetryable(err) { - // The remote write failed so queue it via hinted handoff - w.statMap.Add(statWritePointReqHH, int64(len(points))) - hherr := w.HintedHandoff.WriteShard(shardID, owner.NodeID, points) - - // If the write consistency level is ANY, then a successful hinted handoff can - // be considered a successful write so send nil to the response channel - // otherwise, let the original error propagate to the response channel - if hherr == nil && consistency == ConsistencyLevelAny { - ch <- &AsyncWriteResult{owner, nil} - return - } - } - ch <- &AsyncWriteResult{owner, err} - - }(shard.ID, owner, points) - } - - var wrote int - timeout := time.After(w.WriteTimeout) - var writeError error - for range shard.Owners { - select { - case <-w.closing: - return ErrWriteFailed - case <-timeout: - w.statMap.Add(statWriteTimeout, 1) - // return timeout error to caller - return ErrTimeout - case result := <-ch: - // If the write returned an error, continue to the next response - if result.Err != nil { - w.statMap.Add(statWriteErr, 1) - w.Logger.Printf("write failed for shard %d on node %d: %v", shard.ID, result.Owner.NodeID, result.Err) - - // Keep track of the first error we see to return back to the client - if writeError == nil { - writeError = result.Err - } - continue - } - - wrote++ - - // We wrote the required consistency level - if wrote >= required { - w.statMap.Add(statWriteOK, 1) - return nil - } - } - } - - if wrote > 0 { - w.statMap.Add(statWritePartial, 1) - return ErrPartialWrite - } - - if writeError != nil { - return fmt.Errorf("write failed: %v", writeError) - } - - return ErrWriteFailed -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go deleted file mode 100644 index d0f57fbc7..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go +++ /dev/null @@ -1,493 +0,0 @@ -package cluster_test - -import ( - "fmt" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/models" -) - -// Ensures the points writer maps a single point to a single shard. -func TestPointsWriter_MapShards_One(t *testing.T) { - ms := MetaStore{} - rp := NewRetentionPolicy("myp", time.Hour, 3) - - ms.NodeIDFn = func() uint64 { return 1 } - ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { - return rp, nil - } - - ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - return &rp.ShardGroups[0], nil - } - - c := cluster.PointsWriter{MetaStore: ms} - pr := &cluster.WritePointsRequest{ - Database: "mydb", - RetentionPolicy: "myrp", - ConsistencyLevel: cluster.ConsistencyLevelOne, - } - pr.AddPoint("cpu", 1.0, time.Now(), nil) - - var ( - shardMappings *cluster.ShardMapping - err error - ) - if shardMappings, err = c.MapShards(pr); err != nil { - t.Fatalf("unexpected an error: %v", err) - } - - if exp := 1; len(shardMappings.Points) != exp { - t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) - } -} - -// Ensures the points writer maps a multiple points across shard group boundaries. -func TestPointsWriter_MapShards_Multiple(t *testing.T) { - ms := MetaStore{} - rp := NewRetentionPolicy("myp", time.Hour, 3) - AttachShardGroupInfo(rp, []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }) - AttachShardGroupInfo(rp, []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }) - - ms.NodeIDFn = func() uint64 { return 1 } - ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { - return rp, nil - } - - ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - for i, sg := range rp.ShardGroups { - if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { - return &rp.ShardGroups[i], nil - } - } - panic("should not get here") - } - - c := cluster.PointsWriter{MetaStore: ms} - pr := &cluster.WritePointsRequest{ - Database: "mydb", - RetentionPolicy: "myrp", - ConsistencyLevel: cluster.ConsistencyLevelOne, - } - - // Three points that range over the shardGroup duration (1h) and should map to two - // distinct shards - pr.AddPoint("cpu", 1.0, time.Unix(0, 0), nil) - pr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) - pr.AddPoint("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) - - var ( - shardMappings *cluster.ShardMapping - err error - ) - if shardMappings, err = c.MapShards(pr); err != nil { - t.Fatalf("unexpected an error: %v", err) - } - - if exp := 2; len(shardMappings.Points) != exp { - t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) - } - - for _, points := range shardMappings.Points { - // First shard shoud have 1 point w/ first point added - if len(points) == 1 && points[0].Time() != pr.Points[0].Time() { - t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[0].Time()) - } - - // Second shard shoud have the last two points added - if len(points) == 2 && points[0].Time() != pr.Points[1].Time() { - t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[1].Time()) - } - - if len(points) == 2 && points[1].Time() != pr.Points[2].Time() { - t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[1].Time(), pr.Points[2].Time()) - } - } -} - -func TestPointsWriter_WritePoints(t *testing.T) { - tests := []struct { - name string - database string - retentionPolicy string - consistency cluster.ConsistencyLevel - - // the responses returned by each shard write call. node ID 1 = pos 0 - err []error - expErr error - }{ - // Consistency one - { - name: "write one success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelOne, - err: []error{nil, nil, nil}, - expErr: nil, - }, - { - name: "write one error", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelOne, - err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, - expErr: fmt.Errorf("write failed: a failure"), - }, - - // Consistency any - { - name: "write any success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAny, - err: []error{fmt.Errorf("a failure"), nil, fmt.Errorf("a failure")}, - expErr: nil, - }, - // Consistency all - { - name: "write all success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAll, - err: []error{nil, nil, nil}, - expErr: nil, - }, - { - name: "write all, 2/3, partial write", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAll, - err: []error{nil, fmt.Errorf("a failure"), nil}, - expErr: cluster.ErrPartialWrite, - }, - { - name: "write all, 1/3 (failure)", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAll, - err: []error{nil, fmt.Errorf("a failure"), fmt.Errorf("a failure")}, - expErr: cluster.ErrPartialWrite, - }, - - // Consistency quorum - { - name: "write quorum, 1/3 failure", - consistency: cluster.ConsistencyLevelQuorum, - database: "mydb", - retentionPolicy: "myrp", - err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), nil}, - expErr: cluster.ErrPartialWrite, - }, - { - name: "write quorum, 2/3 success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelQuorum, - err: []error{nil, nil, fmt.Errorf("a failure")}, - expErr: nil, - }, - { - name: "write quorum, 3/3 success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelQuorum, - err: []error{nil, nil, nil}, - expErr: nil, - }, - - // Error write error - { - name: "no writes succeed", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelOne, - err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, - expErr: fmt.Errorf("write failed: a failure"), - }, - - // Hinted handoff w/ ANY - { - name: "hinted handoff write succeed", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAny, - err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, - expErr: nil, - }, - - // Write to non-existent database - { - name: "write to non-existent database", - database: "doesnt_exist", - retentionPolicy: "", - consistency: cluster.ConsistencyLevelAny, - err: []error{nil, nil, nil}, - expErr: fmt.Errorf("database not found: doesnt_exist"), - }, - } - - for _, test := range tests { - - pr := &cluster.WritePointsRequest{ - Database: test.database, - RetentionPolicy: test.retentionPolicy, - ConsistencyLevel: test.consistency, - } - - // Three points that range over the shardGroup duration (1h) and should map to two - // distinct shards - pr.AddPoint("cpu", 1.0, time.Unix(0, 0), nil) - pr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) - pr.AddPoint("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) - - // copy to prevent data race - theTest := test - sm := cluster.NewShardMapping() - sm.MapPoint( - &meta.ShardInfo{ID: uint64(1), Owners: []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }}, - pr.Points[0]) - sm.MapPoint( - &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }}, - pr.Points[1]) - sm.MapPoint( - &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }}, - pr.Points[2]) - - // Local cluster.Node ShardWriter - // lock on the write increment since these functions get called in parallel - var mu sync.Mutex - sw := &fakeShardWriter{ - ShardWriteFn: func(shardID, nodeID uint64, points []models.Point) error { - mu.Lock() - defer mu.Unlock() - return theTest.err[int(nodeID)-1] - }, - } - - store := &fakeStore{ - WriteFn: func(shardID uint64, points []models.Point) error { - mu.Lock() - defer mu.Unlock() - return theTest.err[0] - }, - } - - hh := &fakeShardWriter{ - ShardWriteFn: func(shardID, nodeID uint64, points []models.Point) error { - return nil - }, - } - - ms := NewMetaStore() - ms.DatabaseFn = func(database string) (*meta.DatabaseInfo, error) { - return nil, nil - } - ms.NodeIDFn = func() uint64 { return 1 } - - subPoints := make(chan *cluster.WritePointsRequest, 1) - sub := Subscriber{} - sub.PointsFn = func() chan<- *cluster.WritePointsRequest { - return subPoints - } - - c := cluster.NewPointsWriter() - c.MetaStore = ms - c.ShardWriter = sw - c.TSDBStore = store - c.HintedHandoff = hh - c.Subscriber = sub - - c.Open() - defer c.Close() - - err := c.WritePoints(pr) - if err == nil && test.expErr != nil { - t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) - } - - if err != nil && test.expErr == nil { - t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) - } - if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() { - t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) - } - if test.expErr == nil { - select { - case p := <-subPoints: - if p != pr { - t.Errorf("PointsWriter.WritePoints(): '%s' error: unexpected WritePointsRequest got %v, exp %v", test.name, p, pr) - } - default: - t.Errorf("PointsWriter.WritePoints(): '%s' error: Subscriber.Points not called", test.name) - } - } - } -} - -var shardID uint64 - -type fakeShardWriter struct { - ShardWriteFn func(shardID, nodeID uint64, points []models.Point) error -} - -func (f *fakeShardWriter) WriteShard(shardID, nodeID uint64, points []models.Point) error { - return f.ShardWriteFn(shardID, nodeID, points) -} - -type fakeStore struct { - WriteFn func(shardID uint64, points []models.Point) error - CreateShardfn func(database, retentionPolicy string, shardID uint64) error -} - -func (f *fakeStore) WriteToShard(shardID uint64, points []models.Point) error { - return f.WriteFn(shardID, points) -} - -func (f *fakeStore) CreateShard(database, retentionPolicy string, shardID uint64) error { - return f.CreateShardfn(database, retentionPolicy, shardID) -} - -func NewMetaStore() *MetaStore { - ms := &MetaStore{} - rp := NewRetentionPolicy("myp", time.Hour, 3) - AttachShardGroupInfo(rp, []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }) - AttachShardGroupInfo(rp, []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }) - - ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { - return rp, nil - } - - ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - for i, sg := range rp.ShardGroups { - if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { - return &rp.ShardGroups[i], nil - } - } - panic("should not get here") - } - return ms -} - -type MetaStore struct { - NodeIDFn func() uint64 - RetentionPolicyFn func(database, name string) (*meta.RetentionPolicyInfo, error) - CreateShardGroupIfNotExistsFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) - DatabaseFn func(database string) (*meta.DatabaseInfo, error) - ShardOwnerFn func(shardID uint64) (string, string, *meta.ShardGroupInfo) -} - -func (m MetaStore) NodeID() uint64 { return m.NodeIDFn() } - -func (m MetaStore) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error) { - return m.RetentionPolicyFn(database, name) -} - -func (m MetaStore) CreateShardGroupIfNotExists(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - return m.CreateShardGroupIfNotExistsFn(database, policy, timestamp) -} - -func (m MetaStore) Database(database string) (*meta.DatabaseInfo, error) { - return m.DatabaseFn(database) -} - -func (m MetaStore) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) { - return m.ShardOwnerFn(shardID) -} - -type Subscriber struct { - PointsFn func() chan<- *cluster.WritePointsRequest -} - -func (s Subscriber) Points() chan<- *cluster.WritePointsRequest { - return s.PointsFn() -} - -func NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *meta.RetentionPolicyInfo { - shards := []meta.ShardInfo{} - owners := []meta.ShardOwner{} - for i := 1; i <= nodeCount; i++ { - owners = append(owners, meta.ShardOwner{NodeID: uint64(i)}) - } - - // each node is fully replicated with each other - shards = append(shards, meta.ShardInfo{ - ID: nextShardID(), - Owners: owners, - }) - - rp := &meta.RetentionPolicyInfo{ - Name: "myrp", - ReplicaN: nodeCount, - Duration: duration, - ShardGroupDuration: duration, - ShardGroups: []meta.ShardGroupInfo{ - meta.ShardGroupInfo{ - ID: nextShardID(), - StartTime: time.Unix(0, 0), - EndTime: time.Unix(0, 0).Add(duration).Add(-1), - Shards: shards, - }, - }, - } - return rp -} - -func AttachShardGroupInfo(rp *meta.RetentionPolicyInfo, owners []meta.ShardOwner) { - var startTime, endTime time.Time - if len(rp.ShardGroups) == 0 { - startTime = time.Unix(0, 0) - } else { - startTime = rp.ShardGroups[len(rp.ShardGroups)-1].StartTime.Add(rp.ShardGroupDuration) - } - endTime = startTime.Add(rp.ShardGroupDuration).Add(-1) - - sh := meta.ShardGroupInfo{ - ID: uint64(len(rp.ShardGroups) + 1), - StartTime: startTime, - EndTime: endTime, - Shards: []meta.ShardInfo{ - meta.ShardInfo{ - ID: nextShardID(), - Owners: owners, - }, - }, - } - rp.ShardGroups = append(rp.ShardGroups, sh) -} - -func nextShardID() uint64 { - return atomic.AddUint64(&shardID, 1) -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc.go deleted file mode 100644 index defbb4fd5..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc.go +++ /dev/null @@ -1,213 +0,0 @@ -package cluster - -import ( - "fmt" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/influxdb/influxdb/cluster/internal" - "github.com/influxdb/influxdb/models" -) - -//go:generate protoc --gogo_out=. internal/data.proto - -// MapShardRequest represents the request to map a remote shard for a query. -type MapShardRequest struct { - pb internal.MapShardRequest -} - -// ShardID of the map request -func (m *MapShardRequest) ShardID() uint64 { return m.pb.GetShardID() } - -// Query returns the Shard map request's query -func (m *MapShardRequest) Query() string { return m.pb.GetQuery() } - -// ChunkSize returns Shard map request's chunk size -func (m *MapShardRequest) ChunkSize() int32 { return m.pb.GetChunkSize() } - -// SetShardID sets the map request's shard id -func (m *MapShardRequest) SetShardID(id uint64) { m.pb.ShardID = &id } - -// SetQuery sets the Shard map request's Query -func (m *MapShardRequest) SetQuery(query string) { m.pb.Query = &query } - -// SetChunkSize sets the Shard map request's chunk size -func (m *MapShardRequest) SetChunkSize(chunkSize int32) { m.pb.ChunkSize = &chunkSize } - -// MarshalBinary encodes the object to a binary format. -func (m *MapShardRequest) MarshalBinary() ([]byte, error) { - return proto.Marshal(&m.pb) -} - -// UnmarshalBinary populates MapShardRequest from a binary format. -func (m *MapShardRequest) UnmarshalBinary(buf []byte) error { - if err := proto.Unmarshal(buf, &m.pb); err != nil { - return err - } - return nil -} - -// MapShardResponse represents the response returned from a remote MapShardRequest call -type MapShardResponse struct { - pb internal.MapShardResponse -} - -// NewMapShardResponse returns the response returned from a remote MapShardRequest call -func NewMapShardResponse(code int, message string) *MapShardResponse { - m := &MapShardResponse{} - m.SetCode(code) - m.SetMessage(message) - return m -} - -// Code returns the Shard map response's code -func (r *MapShardResponse) Code() int { return int(r.pb.GetCode()) } - -// Message returns the the Shard map response's Message -func (r *MapShardResponse) Message() string { return r.pb.GetMessage() } - -// TagSets returns Shard map response's tag sets -func (r *MapShardResponse) TagSets() []string { return r.pb.GetTagSets() } - -// Fields returns the Shard map response's Fields -func (r *MapShardResponse) Fields() []string { return r.pb.GetFields() } - -// Data returns the Shard map response's Data -func (r *MapShardResponse) Data() []byte { return r.pb.GetData() } - -// SetCode sets the Shard map response's code -func (r *MapShardResponse) SetCode(code int) { r.pb.Code = proto.Int32(int32(code)) } - -// SetMessage sets Shard map response's message -func (r *MapShardResponse) SetMessage(message string) { r.pb.Message = &message } - -// SetTagSets sets Shard map response's tagsets -func (r *MapShardResponse) SetTagSets(tagsets []string) { r.pb.TagSets = tagsets } - -// SetFields sets the Shard map response's Fields -func (r *MapShardResponse) SetFields(fields []string) { r.pb.Fields = fields } - -// SetData sets the Shard map response's Data -func (r *MapShardResponse) SetData(data []byte) { r.pb.Data = data } - -// MarshalBinary encodes the object to a binary format. -func (r *MapShardResponse) MarshalBinary() ([]byte, error) { - return proto.Marshal(&r.pb) -} - -// UnmarshalBinary populates WritePointRequest from a binary format. -func (r *MapShardResponse) UnmarshalBinary(buf []byte) error { - if err := proto.Unmarshal(buf, &r.pb); err != nil { - return err - } - return nil -} - -// WritePointsRequest represents a request to write point data to the cluster -type WritePointsRequest struct { - Database string - RetentionPolicy string - ConsistencyLevel ConsistencyLevel - Points []models.Point -} - -// AddPoint adds a point to the WritePointRequest with field key 'value' -func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) { - pt, err := models.NewPoint( - name, tags, map[string]interface{}{"value": value}, timestamp, - ) - if err != nil { - return - } - w.Points = append(w.Points, pt) -} - -// WriteShardRequest represents the a request to write a slice of points to a shard -type WriteShardRequest struct { - pb internal.WriteShardRequest -} - -// WriteShardResponse represents the response returned from a remote WriteShardRequest call -type WriteShardResponse struct { - pb internal.WriteShardResponse -} - -// SetShardID sets the ShardID -func (w *WriteShardRequest) SetShardID(id uint64) { w.pb.ShardID = &id } - -// ShardID gets the ShardID -func (w *WriteShardRequest) ShardID() uint64 { return w.pb.GetShardID() } - -// Points returns the time series Points -func (w *WriteShardRequest) Points() []models.Point { return w.unmarshalPoints() } - -// AddPoint adds a new time series point -func (w *WriteShardRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) { - pt, err := models.NewPoint( - name, tags, map[string]interface{}{"value": value}, timestamp, - ) - if err != nil { - return - } - w.AddPoints([]models.Point{pt}) -} - -// AddPoints adds a new time series point -func (w *WriteShardRequest) AddPoints(points []models.Point) { - for _, p := range points { - w.pb.Points = append(w.pb.Points, []byte(p.String())) - } -} - -// MarshalBinary encodes the object to a binary format. -func (w *WriteShardRequest) MarshalBinary() ([]byte, error) { - return proto.Marshal(&w.pb) -} - -// UnmarshalBinary populates WritePointRequest from a binary format. -func (w *WriteShardRequest) UnmarshalBinary(buf []byte) error { - if err := proto.Unmarshal(buf, &w.pb); err != nil { - return err - } - return nil -} - -func (w *WriteShardRequest) unmarshalPoints() []models.Point { - points := make([]models.Point, len(w.pb.GetPoints())) - for i, p := range w.pb.GetPoints() { - pt, err := models.ParsePoints(p) - if err != nil { - // A error here means that one node parsed the point correctly but sent an - // unparseable version to another node. We could log and drop the point and allow - // anti-entropy to resolve the discrepancy but this shouldn't ever happen. - panic(fmt.Sprintf("failed to parse point: `%v`: %v", string(p), err)) - } - points[i] = pt[0] - } - return points -} - -// SetCode sets the Code -func (w *WriteShardResponse) SetCode(code int) { w.pb.Code = proto.Int32(int32(code)) } - -// SetMessage sets the Message -func (w *WriteShardResponse) SetMessage(message string) { w.pb.Message = &message } - -// Code returns the Code -func (w *WriteShardResponse) Code() int { return int(w.pb.GetCode()) } - -// Message returns the Message -func (w *WriteShardResponse) Message() string { return w.pb.GetMessage() } - -// MarshalBinary encodes the object to a binary format. -func (w *WriteShardResponse) MarshalBinary() ([]byte, error) { - return proto.Marshal(&w.pb) -} - -// UnmarshalBinary populates WritePointRequest from a binary format. -func (w *WriteShardResponse) UnmarshalBinary(buf []byte) error { - if err := proto.Unmarshal(buf, &w.pb); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc_test.go deleted file mode 100644 index 4e42cd5d6..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package cluster - -import ( - "testing" - "time" -) - -func TestWriteShardRequestBinary(t *testing.T) { - sr := &WriteShardRequest{} - - sr.SetShardID(uint64(1)) - if exp := uint64(1); sr.ShardID() != exp { - t.Fatalf("ShardID mismatch: got %v, exp %v", sr.ShardID(), exp) - } - - sr.AddPoint("cpu", 1.0, time.Unix(0, 0), map[string]string{"host": "serverA"}) - sr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) - sr.AddPoint("cpu_load", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) - - b, err := sr.MarshalBinary() - if err != nil { - t.Fatalf("WritePointsRequest.MarshalBinary() failed: %v", err) - } - if len(b) == 0 { - t.Fatalf("WritePointsRequest.MarshalBinary() returned 0 bytes") - } - - got := &WriteShardRequest{} - if err := got.UnmarshalBinary(b); err != nil { - t.Fatalf("WritePointsRequest.UnmarshalMarshalBinary() failed: %v", err) - } - - if got.ShardID() != sr.ShardID() { - t.Errorf("ShardID mismatch: got %v, exp %v", got.ShardID(), sr.ShardID()) - } - - if len(got.Points()) != len(sr.Points()) { - t.Errorf("Points count mismatch: got %v, exp %v", len(got.Points()), len(sr.Points())) - } - - srPoints := sr.Points() - gotPoints := got.Points() - for i, p := range srPoints { - g := gotPoints[i] - - if g.Name() != p.Name() { - t.Errorf("Point %d name mismatch: got %v, exp %v", i, g.Name(), p.Name()) - } - - if !g.Time().Equal(p.Time()) { - t.Errorf("Point %d time mismatch: got %v, exp %v", i, g.Time(), p.Time()) - } - - if g.HashID() != p.HashID() { - t.Errorf("Point #%d HashID() mismatch: got %v, exp %v", i, g.HashID(), p.HashID()) - } - - for k, v := range p.Tags() { - if g.Tags()[k] != v { - t.Errorf("Point #%d tag mismatch: got %v, exp %v", i, k, v) - } - } - - if len(p.Fields()) != len(g.Fields()) { - t.Errorf("Point %d field count mismatch: got %v, exp %v", i, len(g.Fields()), len(p.Fields())) - } - - for j, f := range p.Fields() { - if g.Fields()[j] != f { - t.Errorf("Point %d field mismatch: got %v, exp %v", i, g.Fields()[j], f) - } - } - } -} - -func TestWriteShardResponseBinary(t *testing.T) { - sr := &WriteShardResponse{} - sr.SetCode(10) - sr.SetMessage("foo") - b, err := sr.MarshalBinary() - - if exp := 10; sr.Code() != exp { - t.Fatalf("Code mismatch: got %v, exp %v", sr.Code(), exp) - } - - if exp := "foo"; sr.Message() != exp { - t.Fatalf("Message mismatch: got %v, exp %v", sr.Message(), exp) - } - - if err != nil { - t.Fatalf("WritePointsResponse.MarshalBinary() failed: %v", err) - } - if len(b) == 0 { - t.Fatalf("WritePointsResponse.MarshalBinary() returned 0 bytes") - } - - got := &WriteShardResponse{} - if err := got.UnmarshalBinary(b); err != nil { - t.Fatalf("WritePointsResponse.UnmarshalMarshalBinary() failed: %v", err) - } - - if got.Code() != sr.Code() { - t.Errorf("Code mismatch: got %v, exp %v", got.Code(), sr.Code()) - } - - if got.Message() != sr.Message() { - t.Errorf("Message mismatch: got %v, exp %v", got.Message(), sr.Message()) - } - -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service.go deleted file mode 100644 index 5169a6e33..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service.go +++ /dev/null @@ -1,371 +0,0 @@ -package cluster - -import ( - "encoding/binary" - "encoding/json" - "expvar" - "fmt" - "io" - "log" - "net" - "os" - "strings" - "sync" - - "github.com/influxdb/influxdb" - "github.com/influxdb/influxdb/influxql" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/models" - "github.com/influxdb/influxdb/tsdb" -) - -// MaxMessageSize defines how large a message can be before we reject it -const MaxMessageSize = 1024 * 1024 * 1024 // 1GB - -// MuxHeader is the header byte used in the TCP mux. -const MuxHeader = 2 - -// Statistics maintained by the cluster package -const ( - writeShardReq = "writeShardReq" - writeShardPointsReq = "writeShardPointsReq" - writeShardFail = "writeShardFail" - mapShardReq = "mapShardReq" - mapShardResp = "mapShardResp" -) - -// Service processes data received over raw TCP connections. -type Service struct { - mu sync.RWMutex - - wg sync.WaitGroup - closing chan struct{} - - Listener net.Listener - - MetaStore interface { - ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) - } - - TSDBStore interface { - CreateShard(database, policy string, shardID uint64) error - WriteToShard(shardID uint64, points []models.Point) error - CreateMapper(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) - } - - Logger *log.Logger - statMap *expvar.Map -} - -// NewService returns a new instance of Service. -func NewService(c Config) *Service { - return &Service{ - closing: make(chan struct{}), - Logger: log.New(os.Stderr, "[cluster] ", log.LstdFlags), - statMap: influxdb.NewStatistics("cluster", "cluster", nil), - } -} - -// Open opens the network listener and begins serving requests. -func (s *Service) Open() error { - - s.Logger.Println("Starting cluster service") - // Begin serving conections. - s.wg.Add(1) - go s.serve() - - return nil -} - -// SetLogger sets the internal logger to the logger passed in. -func (s *Service) SetLogger(l *log.Logger) { - s.Logger = l -} - -// serve accepts connections from the listener and handles them. -func (s *Service) serve() { - defer s.wg.Done() - - for { - // Check if the service is shutting down. - select { - case <-s.closing: - return - default: - } - - // Accept the next connection. - conn, err := s.Listener.Accept() - if err != nil { - if strings.Contains(err.Error(), "connection closed") { - s.Logger.Printf("cluster service accept error: %s", err) - return - } - s.Logger.Printf("accept error: %s", err) - continue - } - - // Delegate connection handling to a separate goroutine. - s.wg.Add(1) - go func() { - defer s.wg.Done() - s.handleConn(conn) - }() - } -} - -// Close shuts down the listener and waits for all connections to finish. -func (s *Service) Close() error { - if s.Listener != nil { - s.Listener.Close() - } - - // Shut down all handlers. - close(s.closing) - s.wg.Wait() - - return nil -} - -// handleConn services an individual TCP connection. -func (s *Service) handleConn(conn net.Conn) { - // Ensure connection is closed when service is closed. - closing := make(chan struct{}) - defer close(closing) - go func() { - select { - case <-closing: - case <-s.closing: - } - conn.Close() - }() - - s.Logger.Printf("accept remote connection from %v\n", conn.RemoteAddr()) - defer func() { - s.Logger.Printf("close remote connection from %v\n", conn.RemoteAddr()) - }() - for { - // Read type-length-value. - typ, buf, err := ReadTLV(conn) - if err != nil { - if strings.HasSuffix(err.Error(), "EOF") { - return - } - s.Logger.Printf("unable to read type-length-value %s", err) - return - } - - // Delegate message processing by type. - switch typ { - case writeShardRequestMessage: - s.statMap.Add(writeShardReq, 1) - err := s.processWriteShardRequest(buf) - if err != nil { - s.Logger.Printf("process write shard error: %s", err) - } - s.writeShardResponse(conn, err) - case mapShardRequestMessage: - s.statMap.Add(mapShardReq, 1) - err := s.processMapShardRequest(conn, buf) - if err != nil { - s.Logger.Printf("process map shard error: %s", err) - if err := writeMapShardResponseMessage(conn, NewMapShardResponse(1, err.Error())); err != nil { - s.Logger.Printf("process map shard error writing response: %s", err.Error()) - } - } - default: - s.Logger.Printf("cluster service message type not found: %d", typ) - } - } -} - -func (s *Service) processWriteShardRequest(buf []byte) error { - // Build request - var req WriteShardRequest - if err := req.UnmarshalBinary(buf); err != nil { - return err - } - - points := req.Points() - s.statMap.Add(writeShardPointsReq, int64(len(points))) - err := s.TSDBStore.WriteToShard(req.ShardID(), req.Points()) - - // We may have received a write for a shard that we don't have locally because the - // sending node may have just created the shard (via the metastore) and the write - // arrived before the local store could create the shard. In this case, we need - // to check the metastore to determine what database and retention policy this - // shard should reside within. - if err == tsdb.ErrShardNotFound { - - // Query the metastore for the owner of this shard - database, retentionPolicy, sgi := s.MetaStore.ShardOwner(req.ShardID()) - if sgi == nil { - // If we can't find it, then we need to drop this request - // as it is no longer valid. This could happen if writes were queued via - // hinted handoff and delivered after a shard group was deleted. - s.Logger.Printf("drop write request: shard=%d. shard group does not exist or was deleted", req.ShardID()) - return nil - } - - err = s.TSDBStore.CreateShard(database, retentionPolicy, req.ShardID()) - if err != nil { - return err - } - return s.TSDBStore.WriteToShard(req.ShardID(), req.Points()) - } - - if err != nil { - s.statMap.Add(writeShardFail, 1) - return fmt.Errorf("write shard %d: %s", req.ShardID(), err) - } - - return nil -} - -func (s *Service) writeShardResponse(w io.Writer, e error) { - // Build response. - var resp WriteShardResponse - if e != nil { - resp.SetCode(1) - resp.SetMessage(e.Error()) - } else { - resp.SetCode(0) - } - - // Marshal response to binary. - buf, err := resp.MarshalBinary() - if err != nil { - s.Logger.Printf("error marshalling shard response: %s", err) - return - } - - // Write to connection. - if err := WriteTLV(w, writeShardResponseMessage, buf); err != nil { - s.Logger.Printf("write shard response error: %s", err) - } -} - -func (s *Service) processMapShardRequest(w io.Writer, buf []byte) error { - // Decode request - var req MapShardRequest - if err := req.UnmarshalBinary(buf); err != nil { - return err - } - - // Parse the statement. - q, err := influxql.ParseQuery(req.Query()) - if err != nil { - return fmt.Errorf("processing map shard: %s", err) - } else if len(q.Statements) != 1 { - return fmt.Errorf("processing map shard: expected 1 statement but got %d", len(q.Statements)) - } - - m, err := s.TSDBStore.CreateMapper(req.ShardID(), q.Statements[0], int(req.ChunkSize())) - if err != nil { - return fmt.Errorf("create mapper: %s", err) - } - if m == nil { - return writeMapShardResponseMessage(w, NewMapShardResponse(0, "")) - } - - if err := m.Open(); err != nil { - return fmt.Errorf("mapper open: %s", err) - } - defer m.Close() - - var metaSent bool - for { - var resp MapShardResponse - - if !metaSent { - resp.SetTagSets(m.TagSets()) - resp.SetFields(m.Fields()) - metaSent = true - } - - chunk, err := m.NextChunk() - if err != nil { - return fmt.Errorf("next chunk: %s", err) - } - - // NOTE: Even if the chunk is nil, we still need to send one - // empty response to let the other side know we're out of data. - - if chunk != nil { - b, err := json.Marshal(chunk) - if err != nil { - return fmt.Errorf("encoding: %s", err) - } - resp.SetData(b) - } - - // Write to connection. - resp.SetCode(0) - if err := writeMapShardResponseMessage(w, &resp); err != nil { - return err - } - s.statMap.Add(mapShardResp, 1) - - if chunk == nil { - // All mapper data sent. - return nil - } - } -} - -func writeMapShardResponseMessage(w io.Writer, msg *MapShardResponse) error { - buf, err := msg.MarshalBinary() - if err != nil { - return err - } - return WriteTLV(w, mapShardResponseMessage, buf) -} - -// ReadTLV reads a type-length-value record from r. -func ReadTLV(r io.Reader) (byte, []byte, error) { - var typ [1]byte - if _, err := io.ReadFull(r, typ[:]); err != nil { - return 0, nil, fmt.Errorf("read message type: %s", err) - } - - // Read the size of the message. - var sz int64 - if err := binary.Read(r, binary.BigEndian, &sz); err != nil { - return 0, nil, fmt.Errorf("read message size: %s", err) - } - - if sz == 0 { - return 0, nil, fmt.Errorf("invalid message size: %d", sz) - } - - if sz >= MaxMessageSize { - return 0, nil, fmt.Errorf("max message size of %d exceeded: %d", MaxMessageSize, sz) - } - - // Read the value. - buf := make([]byte, sz) - if _, err := io.ReadFull(r, buf); err != nil { - return 0, nil, fmt.Errorf("read message value: %s", err) - } - - return typ[0], buf, nil -} - -// WriteTLV writes a type-length-value record to w. -func WriteTLV(w io.Writer, typ byte, buf []byte) error { - if _, err := w.Write([]byte{typ}); err != nil { - return fmt.Errorf("write message type: %s", err) - } - - // Write the size of the message. - if err := binary.Write(w, binary.BigEndian, int64(len(buf))); err != nil { - return fmt.Errorf("write message size: %s", err) - } - - // Write the value. - if _, err := w.Write(buf); err != nil { - return fmt.Errorf("write message value: %s", err) - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service_test.go deleted file mode 100644 index 8158c47a6..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package cluster_test - -import ( - "fmt" - "net" - "time" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/influxql" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/models" - "github.com/influxdb/influxdb/tcp" - "github.com/influxdb/influxdb/tsdb" -) - -type metaStore struct { - host string -} - -func (m *metaStore) Node(nodeID uint64) (*meta.NodeInfo, error) { - return &meta.NodeInfo{ - ID: nodeID, - Host: m.host, - }, nil -} - -type testService struct { - nodeID uint64 - ln net.Listener - muxln net.Listener - writeShardFunc func(shardID uint64, points []models.Point) error - createShardFunc func(database, policy string, shardID uint64) error - createMapperFunc func(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) -} - -func newTestWriteService(f func(shardID uint64, points []models.Point) error) testService { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - panic(err) - } - - mux := tcp.NewMux() - muxln := mux.Listen(cluster.MuxHeader) - go mux.Serve(ln) - - return testService{ - writeShardFunc: f, - ln: ln, - muxln: muxln, - } -} - -func (ts *testService) Close() { - if ts.ln != nil { - ts.ln.Close() - } -} - -type serviceResponses []serviceResponse -type serviceResponse struct { - shardID uint64 - ownerID uint64 - points []models.Point -} - -func (t testService) WriteToShard(shardID uint64, points []models.Point) error { - return t.writeShardFunc(shardID, points) -} - -func (t testService) CreateShard(database, policy string, shardID uint64) error { - return t.createShardFunc(database, policy, shardID) -} - -func (t testService) CreateMapper(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) { - return t.createMapperFunc(shardID, stmt, chunkSize) -} - -func writeShardSuccess(shardID uint64, points []models.Point) error { - responses <- &serviceResponse{ - shardID: shardID, - points: points, - } - return nil -} - -func writeShardFail(shardID uint64, points []models.Point) error { - return fmt.Errorf("failed to write") -} - -var responses = make(chan *serviceResponse, 1024) - -func (testService) ResponseN(n int) ([]*serviceResponse, error) { - var a []*serviceResponse - for { - select { - case r := <-responses: - a = append(a, r) - if len(a) == n { - return a, nil - } - case <-time.After(time.Second): - return a, fmt.Errorf("unexpected response count: expected: %d, actual: %d", n, len(a)) - } - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper.go deleted file mode 100644 index b8645f4a7..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper.go +++ /dev/null @@ -1,259 +0,0 @@ -package cluster - -import ( - "encoding/json" - "fmt" - "math/rand" - "net" - "time" - - "github.com/influxdb/influxdb/influxql" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/tsdb" -) - -// ShardMapper is responsible for providing mappers for requested shards. It is -// responsible for creating those mappers from the local store, or reaching -// out to another node on the cluster. -type ShardMapper struct { - ForceRemoteMapping bool // All shards treated as remote. Useful for testing. - - MetaStore interface { - NodeID() uint64 - Node(id uint64) (ni *meta.NodeInfo, err error) - } - - TSDBStore interface { - CreateMapper(shardID uint64, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) - } - - timeout time.Duration - pool *clientPool -} - -// NewShardMapper returns a mapper of local and remote shards. -func NewShardMapper(timeout time.Duration) *ShardMapper { - return &ShardMapper{ - pool: newClientPool(), - timeout: timeout, - } -} - -// CreateMapper returns a Mapper for the given shard ID. -func (s *ShardMapper) CreateMapper(sh meta.ShardInfo, stmt influxql.Statement, chunkSize int) (tsdb.Mapper, error) { - // Create a remote mapper if the local node doesn't own the shard. - if !sh.OwnedBy(s.MetaStore.NodeID()) || s.ForceRemoteMapping { - // Pick a node in a pseudo-random manner. - conn, err := s.dial(sh.Owners[rand.Intn(len(sh.Owners))].NodeID) - if err != nil { - return nil, err - } - conn.SetDeadline(time.Now().Add(s.timeout)) - - return NewRemoteMapper(conn, sh.ID, stmt, chunkSize), nil - } - - // If it is local then return the mapper from the store. - m, err := s.TSDBStore.CreateMapper(sh.ID, stmt, chunkSize) - if err != nil { - return nil, err - } - - return m, nil -} - -func (s *ShardMapper) dial(nodeID uint64) (net.Conn, error) { - ni, err := s.MetaStore.Node(nodeID) - if err != nil { - return nil, err - } - conn, err := net.Dial("tcp", ni.Host) - if err != nil { - return nil, err - } - - // Write the cluster multiplexing header byte - conn.Write([]byte{MuxHeader}) - - return conn, nil -} - -// RemoteMapper implements the tsdb.Mapper interface. It connects to a remote node, -// sends a query, and interprets the stream of data that comes back. -type RemoteMapper struct { - shardID uint64 - stmt influxql.Statement - chunkSize int - - tagsets []string - fields []string - - conn net.Conn - bufferedResponse *MapShardResponse - - unmarshallers []tsdb.UnmarshalFunc // Mapping-specific unmarshal functions. -} - -// NewRemoteMapper returns a new remote mapper using the given connection. -func NewRemoteMapper(c net.Conn, shardID uint64, stmt influxql.Statement, chunkSize int) *RemoteMapper { - return &RemoteMapper{ - conn: c, - shardID: shardID, - stmt: stmt, - chunkSize: chunkSize, - } -} - -// Open connects to the remote node and starts receiving data. -func (r *RemoteMapper) Open() (err error) { - defer func() { - if err != nil { - r.conn.Close() - } - }() - - // Build Map request. - var request MapShardRequest - request.SetShardID(r.shardID) - request.SetQuery(r.stmt.String()) - request.SetChunkSize(int32(r.chunkSize)) - - // Marshal into protocol buffers. - buf, err := request.MarshalBinary() - if err != nil { - return err - } - - // Write request. - if err := WriteTLV(r.conn, mapShardRequestMessage, buf); err != nil { - return err - } - - // Read the response. - _, buf, err = ReadTLV(r.conn) - if err != nil { - return err - } - - // Unmarshal response. - r.bufferedResponse = &MapShardResponse{} - if err := r.bufferedResponse.UnmarshalBinary(buf); err != nil { - return err - } - - if r.bufferedResponse.Code() != 0 { - return fmt.Errorf("error code %d: %s", r.bufferedResponse.Code(), r.bufferedResponse.Message()) - } - - // Decode the first response to get the TagSets. - r.tagsets = r.bufferedResponse.TagSets() - r.fields = r.bufferedResponse.Fields() - - // Set up each mapping function for this statement. - if stmt, ok := r.stmt.(*influxql.SelectStatement); ok { - for _, c := range stmt.FunctionCalls() { - fn, err := tsdb.InitializeUnmarshaller(c) - if err != nil { - return err - } - r.unmarshallers = append(r.unmarshallers, fn) - } - } - - return nil -} - -// TagSets returns the TagSets -func (r *RemoteMapper) TagSets() []string { - return r.tagsets -} - -// Fields returns RemoteMapper's Fields -func (r *RemoteMapper) Fields() []string { - return r.fields -} - -// NextChunk returns the next chunk read from the remote node to the client. -func (r *RemoteMapper) NextChunk() (chunk interface{}, err error) { - var response *MapShardResponse - if r.bufferedResponse != nil { - response = r.bufferedResponse - r.bufferedResponse = nil - } else { - response = &MapShardResponse{} - - // Read the response. - _, buf, err := ReadTLV(r.conn) - if err != nil { - return nil, err - } - - // Unmarshal response. - if err := response.UnmarshalBinary(buf); err != nil { - return nil, err - } - - if response.Code() != 0 { - return nil, fmt.Errorf("error code %d: %s", response.Code(), response.Message()) - } - } - - if response.Data() == nil { - return nil, nil - } - - moj := &tsdb.MapperOutputJSON{} - if err := json.Unmarshal(response.Data(), moj); err != nil { - return nil, err - } - mvj := []*tsdb.MapperValueJSON{} - if err := json.Unmarshal(moj.Values, &mvj); err != nil { - return nil, err - } - - // Prep the non-JSON version of Mapper output. - mo := &tsdb.MapperOutput{ - Name: moj.Name, - Tags: moj.Tags, - Fields: moj.Fields, - } - - if len(mvj) == 1 && len(mvj[0].AggData) > 0 { - // The MapperValue is carrying aggregate data, so run it through the - // custom unmarshallers for the map functions through which the data - // was mapped. - aggValues := []interface{}{} - for i, b := range mvj[0].AggData { - v, err := r.unmarshallers[i](b) - if err != nil { - return nil, err - } - aggValues = append(aggValues, v) - } - mo.Values = []*tsdb.MapperValue{&tsdb.MapperValue{ - Value: aggValues, - Tags: mvj[0].Tags, - }} - } else { - // Must be raw data instead. - for _, v := range mvj { - var rawValue interface{} - if err := json.Unmarshal(v.RawData, &rawValue); err != nil { - return nil, err - } - - mo.Values = append(mo.Values, &tsdb.MapperValue{ - Time: v.Time, - Value: rawValue, - Tags: v.Tags, - }) - } - } - - return mo, nil -} - -// Close the Mapper -func (r *RemoteMapper) Close() { - r.conn.Close() -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go deleted file mode 100644 index 3a80f596e..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package cluster - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net" - "testing" - - "github.com/influxdb/influxdb/influxql" - "github.com/influxdb/influxdb/tsdb" -) - -// remoteShardResponder implements the remoteShardConn interface. -type remoteShardResponder struct { - net.Conn - t *testing.T - rxBytes []byte - - buffer *bytes.Buffer -} - -func newRemoteShardResponder(outputs []*tsdb.MapperOutput, tagsets []string) *remoteShardResponder { - r := &remoteShardResponder{} - a := make([]byte, 0, 1024) - r.buffer = bytes.NewBuffer(a) - - // Pump the outputs in the buffer for later reading. - for _, o := range outputs { - resp := &MapShardResponse{} - resp.SetCode(0) - if o != nil { - d, _ := json.Marshal(o) - resp.SetData(d) - resp.SetTagSets(tagsets) - } - - g, _ := resp.MarshalBinary() - WriteTLV(r.buffer, mapShardResponseMessage, g) - } - - return r -} - -func (r remoteShardResponder) Close() error { return nil } -func (r remoteShardResponder) Read(p []byte) (n int, err error) { - return io.ReadFull(r.buffer, p) -} - -func (r remoteShardResponder) Write(p []byte) (n int, err error) { - if r.rxBytes == nil { - r.rxBytes = make([]byte, 0) - } - r.rxBytes = append(r.rxBytes, p...) - return len(p), nil -} - -// Ensure a RemoteMapper can process valid responses from a remote shard. -func TestShardWriter_RemoteMapper_Success(t *testing.T) { - expTagSets := []string{"tagsetA"} - expOutput := &tsdb.MapperOutput{ - Name: "cpu", - Tags: map[string]string{"host": "serverA"}, - } - - c := newRemoteShardResponder([]*tsdb.MapperOutput{expOutput, nil}, expTagSets) - - r := NewRemoteMapper(c, 1234, mustParseStmt("SELECT * FROM CPU"), 10) - if err := r.Open(); err != nil { - t.Fatalf("failed to open remote mapper: %s", err.Error()) - } - - if r.TagSets()[0] != expTagSets[0] { - t.Fatalf("incorrect tagsets received, exp %v, got %v", expTagSets, r.TagSets()) - } - - // Get first chunk from mapper. - chunk, err := r.NextChunk() - if err != nil { - t.Fatalf("failed to get next chunk from mapper: %s", err.Error()) - } - output, ok := chunk.(*tsdb.MapperOutput) - if !ok { - t.Fatal("chunk is not of expected type") - } - if output.Name != "cpu" { - t.Fatalf("received output incorrect, exp: %v, got %v", expOutput, output) - } - - // Next chunk should be nil, indicating no more data. - chunk, err = r.NextChunk() - if err != nil { - t.Fatalf("failed to get next chunk from mapper: %s", err.Error()) - } - if chunk != nil { - t.Fatal("received more chunks when none expected") - } -} - -// mustParseStmt parses a single statement or panics. -func mustParseStmt(stmt string) influxql.Statement { - q, err := influxql.ParseQuery(stmt) - if err != nil { - panic(err) - } else if len(q.Statements) != 1 { - panic(fmt.Sprintf("expected 1 statement but got %d", len(q.Statements))) - } - return q.Statements[0] -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer.go deleted file mode 100644 index f6da1023c..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer.go +++ /dev/null @@ -1,165 +0,0 @@ -package cluster - -import ( - "fmt" - "net" - "time" - - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/models" - "gopkg.in/fatih/pool.v2" -) - -const ( - writeShardRequestMessage byte = iota + 1 - writeShardResponseMessage - mapShardRequestMessage - mapShardResponseMessage -) - -// ShardWriter writes a set of points to a shard. -type ShardWriter struct { - pool *clientPool - timeout time.Duration - - MetaStore interface { - Node(id uint64) (ni *meta.NodeInfo, err error) - } -} - -// NewShardWriter returns a new instance of ShardWriter. -func NewShardWriter(timeout time.Duration) *ShardWriter { - return &ShardWriter{ - pool: newClientPool(), - timeout: timeout, - } -} - -// WriteShard writes time series points to a shard -func (w *ShardWriter) WriteShard(shardID, ownerID uint64, points []models.Point) error { - c, err := w.dial(ownerID) - if err != nil { - return err - } - - conn, ok := c.(*pool.PoolConn) - if !ok { - panic("wrong connection type") - } - defer func(conn net.Conn) { - conn.Close() // return to pool - }(conn) - - // Build write request. - var request WriteShardRequest - request.SetShardID(shardID) - request.AddPoints(points) - - // Marshal into protocol buffers. - buf, err := request.MarshalBinary() - if err != nil { - return err - } - - // Write request. - conn.SetWriteDeadline(time.Now().Add(w.timeout)) - if err := WriteTLV(conn, writeShardRequestMessage, buf); err != nil { - conn.MarkUnusable() - return err - } - - // Read the response. - conn.SetReadDeadline(time.Now().Add(w.timeout)) - _, buf, err = ReadTLV(conn) - if err != nil { - conn.MarkUnusable() - return err - } - - // Unmarshal response. - var response WriteShardResponse - if err := response.UnmarshalBinary(buf); err != nil { - return err - } - - if response.Code() != 0 { - return fmt.Errorf("error code %d: %s", response.Code(), response.Message()) - } - - return nil -} - -func (w *ShardWriter) dial(nodeID uint64) (net.Conn, error) { - // If we don't have a connection pool for that addr yet, create one - _, ok := w.pool.getPool(nodeID) - if !ok { - factory := &connFactory{nodeID: nodeID, clientPool: w.pool, timeout: w.timeout} - factory.metaStore = w.MetaStore - - p, err := pool.NewChannelPool(1, 3, factory.dial) - if err != nil { - return nil, err - } - w.pool.setPool(nodeID, p) - } - return w.pool.conn(nodeID) -} - -// Close closes ShardWriter's pool -func (w *ShardWriter) Close() error { - if w.pool == nil { - return fmt.Errorf("client already closed") - } - w.pool.close() - w.pool = nil - return nil -} - -const ( - maxConnections = 500 - maxRetries = 3 -) - -var errMaxConnectionsExceeded = fmt.Errorf("can not exceed max connections of %d", maxConnections) - -type connFactory struct { - nodeID uint64 - timeout time.Duration - - clientPool interface { - size() int - } - - metaStore interface { - Node(id uint64) (ni *meta.NodeInfo, err error) - } -} - -func (c *connFactory) dial() (net.Conn, error) { - if c.clientPool.size() > maxConnections { - return nil, errMaxConnectionsExceeded - } - - ni, err := c.metaStore.Node(c.nodeID) - if err != nil { - return nil, err - } - - if ni == nil { - return nil, fmt.Errorf("node %d does not exist", c.nodeID) - } - - conn, err := net.DialTimeout("tcp", ni.Host, c.timeout) - if err != nil { - return nil, err - } - - // Write a marker byte for cluster messages. - _, err = conn.Write([]byte{MuxHeader}) - if err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer_test.go deleted file mode 100644 index 671342349..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer_test.go +++ /dev/null @@ -1,186 +0,0 @@ -package cluster_test - -import ( - "net" - "strings" - "testing" - "time" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/models" -) - -// Ensure the shard writer can successful write a single request. -func TestShardWriter_WriteShard_Success(t *testing.T) { - ts := newTestWriteService(writeShardSuccess) - s := cluster.NewService(cluster.Config{}) - s.Listener = ts.muxln - s.TSDBStore = ts - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer s.Close() - defer ts.Close() - - w := cluster.NewShardWriter(time.Minute) - w.MetaStore = &metaStore{host: ts.ln.Addr().String()} - - // Build a single point. - now := time.Now() - var points []models.Point - points = append(points, models.MustNewPoint("cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now)) - - // Write to shard and close. - if err := w.WriteShard(1, 2, points); err != nil { - t.Fatal(err) - } else if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Validate response. - responses, err := ts.ResponseN(1) - if err != nil { - t.Fatal(err) - } else if responses[0].shardID != 1 { - t.Fatalf("unexpected shard id: %d", responses[0].shardID) - } - - // Validate point. - if p := responses[0].points[0]; p.Name() != "cpu" { - t.Fatalf("unexpected name: %s", p.Name()) - } else if p.Fields()["value"] != int64(100) { - t.Fatalf("unexpected 'value' field: %d", p.Fields()["value"]) - } else if p.Tags()["host"] != "server01" { - t.Fatalf("unexpected 'host' tag: %s", p.Tags()["host"]) - } else if p.Time().UnixNano() != now.UnixNano() { - t.Fatalf("unexpected time: %s", p.Time()) - } -} - -// Ensure the shard writer can successful write a multiple requests. -func TestShardWriter_WriteShard_Multiple(t *testing.T) { - ts := newTestWriteService(writeShardSuccess) - s := cluster.NewService(cluster.Config{}) - s.Listener = ts.muxln - s.TSDBStore = ts - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer s.Close() - defer ts.Close() - - w := cluster.NewShardWriter(time.Minute) - w.MetaStore = &metaStore{host: ts.ln.Addr().String()} - - // Build a single point. - now := time.Now() - var points []models.Point - points = append(points, models.MustNewPoint("cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now)) - - // Write to shard twice and close. - if err := w.WriteShard(1, 2, points); err != nil { - t.Fatal(err) - } else if err := w.WriteShard(1, 2, points); err != nil { - t.Fatal(err) - } else if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Validate response. - responses, err := ts.ResponseN(1) - if err != nil { - t.Fatal(err) - } else if responses[0].shardID != 1 { - t.Fatalf("unexpected shard id: %d", responses[0].shardID) - } - - // Validate point. - if p := responses[0].points[0]; p.Name() != "cpu" { - t.Fatalf("unexpected name: %s", p.Name()) - } else if p.Fields()["value"] != int64(100) { - t.Fatalf("unexpected 'value' field: %d", p.Fields()["value"]) - } else if p.Tags()["host"] != "server01" { - t.Fatalf("unexpected 'host' tag: %s", p.Tags()["host"]) - } else if p.Time().UnixNano() != now.UnixNano() { - t.Fatalf("unexpected time: %s", p.Time()) - } -} - -// Ensure the shard writer returns an error when the server fails to accept the write. -func TestShardWriter_WriteShard_Error(t *testing.T) { - ts := newTestWriteService(writeShardFail) - s := cluster.NewService(cluster.Config{}) - s.Listener = ts.muxln - s.TSDBStore = ts - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer s.Close() - defer ts.Close() - - w := cluster.NewShardWriter(time.Minute) - w.MetaStore = &metaStore{host: ts.ln.Addr().String()} - now := time.Now() - - shardID := uint64(1) - ownerID := uint64(2) - var points []models.Point - points = append(points, models.MustNewPoint( - "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, - )) - - if err := w.WriteShard(shardID, ownerID, points); err == nil || err.Error() != "error code 1: write shard 1: failed to write" { - t.Fatalf("unexpected error: %v", err) - } -} - -// Ensure the shard writer returns an error when dialing times out. -func TestShardWriter_Write_ErrDialTimeout(t *testing.T) { - ts := newTestWriteService(writeShardSuccess) - s := cluster.NewService(cluster.Config{}) - s.Listener = ts.muxln - s.TSDBStore = ts - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer s.Close() - defer ts.Close() - - w := cluster.NewShardWriter(time.Nanosecond) - w.MetaStore = &metaStore{host: ts.ln.Addr().String()} - now := time.Now() - - shardID := uint64(1) - ownerID := uint64(2) - var points []models.Point - points = append(points, models.MustNewPoint( - "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, - )) - - if err, exp := w.WriteShard(shardID, ownerID, points), "i/o timeout"; err == nil || !strings.Contains(err.Error(), exp) { - t.Fatalf("expected error %v, to contain %s", err, exp) - } -} - -// Ensure the shard writer returns an error when reading times out. -func TestShardWriter_Write_ErrReadTimeout(t *testing.T) { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - - w := cluster.NewShardWriter(time.Millisecond) - w.MetaStore = &metaStore{host: ln.Addr().String()} - now := time.Now() - - shardID := uint64(1) - ownerID := uint64(2) - var points []models.Point - points = append(points, models.MustNewPoint( - "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, - )) - - if err := w.WriteShard(shardID, ownerID, points); err == nil || !strings.Contains(err.Error(), "i/o timeout") { - t.Fatalf("unexpected error: %s", err) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/cli/cli.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/cli/cli.go deleted file mode 100644 index 4ce49cf5d..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/cli/cli.go +++ /dev/null @@ -1,757 +0,0 @@ -package cli - -import ( - "encoding/csv" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "net/url" - "os" - "os/user" - "path/filepath" - "sort" - "strconv" - "strings" - "text/tabwriter" - - "github.com/influxdb/influxdb/client" - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/importer/v8" - "github.com/peterh/liner" -) - -const ( - noTokenMsg = "Visit https://enterprise.influxdata.com to register for updates, InfluxDB server management, and monitoring.\n" -) - -type CommandLine struct { - Client *client.Client - Line *liner.State - Host string - Port int - Username string - Password string - Database string - Ssl bool - RetentionPolicy string - ClientVersion string - ServerVersion string - Pretty bool // controls pretty print for json - Format string // controls the output format. Valid values are json, csv, or column - Precision string - WriteConsistency string - Execute string - ShowVersion bool - Import bool - PPS int // Controls how many points per second the import will allow via throttling - Path string - Compressed bool -} - -func New(version string) *CommandLine { - return &CommandLine{ClientVersion: version} -} - -func (c *CommandLine) Run() { - var promptForPassword bool - // determine if they set the password flag but provided no value - for _, v := range os.Args { - v = strings.ToLower(v) - if (strings.HasPrefix(v, "-password") || strings.HasPrefix(v, "--password")) && c.Password == "" { - promptForPassword = true - break - } - } - - c.Line = liner.NewLiner() - defer c.Line.Close() - - if promptForPassword { - p, e := c.Line.PasswordPrompt("password: ") - if e != nil { - fmt.Println("Unable to parse password.") - } else { - c.Password = p - } - } - - if err := c.Connect(""); err != nil { - fmt.Fprintf(os.Stderr, - "Failed to connect to %s\nPlease check your connection settings and ensure 'influxd' is running.\n", - c.Client.Addr()) - return - } - - if c.Execute == "" && !c.Import { - token, err := c.DatabaseToken() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to check token: %s\n", err.Error()) - return - } - if token == "" { - fmt.Printf(noTokenMsg) - } - fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.ServerVersion) - } - - if c.Execute != "" { - // Modify precision before executing query - c.SetPrecision(c.Precision) - if err := c.ExecuteQuery(c.Execute); err != nil { - c.Line.Close() - os.Exit(1) - } - c.Line.Close() - os.Exit(0) - } - - if c.Import { - path := net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) - u, e := client.ParseConnectionString(path, c.Ssl) - if e != nil { - fmt.Println(e) - return - } - - config := v8.NewConfig() - config.Username = c.Username - config.Password = c.Password - config.Precision = "ns" - config.WriteConsistency = "any" - config.Path = c.Path - config.Version = c.ClientVersion - config.URL = u - config.Compressed = c.Compressed - config.PPS = c.PPS - config.Precision = c.Precision - - i := v8.NewImporter(config) - if err := i.Import(); err != nil { - fmt.Printf("ERROR: %s\n", err) - c.Line.Close() - os.Exit(1) - } - c.Line.Close() - os.Exit(0) - } - - c.Version() - - var historyFile string - usr, err := user.Current() - // Only load history if we can get the user - if err == nil { - historyFile = filepath.Join(usr.HomeDir, ".influx_history") - - if f, err := os.Open(historyFile); err == nil { - c.Line.ReadHistory(f) - f.Close() - } - } - - for { - l, e := c.Line.Prompt("> ") - if e != nil { - break - } - if c.ParseCommand(l) { - // write out the history - if len(historyFile) > 0 { - c.Line.AppendHistory(l) - if f, err := os.Create(historyFile); err == nil { - c.Line.WriteHistory(f) - f.Close() - } - } - } else { - break // exit main loop - } - } -} - -func (c *CommandLine) ParseCommand(cmd string) bool { - lcmd := strings.TrimSpace(strings.ToLower(cmd)) - - split := strings.Split(lcmd, " ") - var tokens []string - for _, token := range split { - if token != "" { - tokens = append(tokens, token) - } - } - - if len(tokens) > 0 { - switch tokens[0] { - case "": - break - case "exit": - // signal the program to exit - return false - case "gopher": - c.gopher() - case "connect": - c.Connect(cmd) - case "auth": - c.SetAuth(cmd) - case "help": - c.help() - case "history": - c.history() - case "format": - c.SetFormat(cmd) - case "precision": - c.SetPrecision(cmd) - case "consistency": - c.SetWriteConsistency(cmd) - case "settings": - c.Settings() - case "pretty": - c.Pretty = !c.Pretty - if c.Pretty { - fmt.Println("Pretty print enabled") - } else { - fmt.Println("Pretty print disabled") - } - case "use": - c.use(cmd) - case "insert": - c.Insert(cmd) - default: - c.ExecuteQuery(cmd) - } - } - return true -} - -// Connect connects client to a server -func (c *CommandLine) Connect(cmd string) error { - var cl *client.Client - var u url.URL - - // Remove the "connect" keyword if it exists - path := strings.TrimSpace(strings.Replace(cmd, "connect", "", -1)) - - // If they didn't provide a connection string, use the current settings - if path == "" { - path = net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) - } - - var e error - u, e = client.ParseConnectionString(path, c.Ssl) - if e != nil { - return e - } - - config := client.NewConfig() - config.URL = u - config.Username = c.Username - config.Password = c.Password - config.UserAgent = "InfluxDBShell/" + c.ClientVersion - config.Precision = c.Precision - cl, err := client.NewClient(config) - if err != nil { - return fmt.Errorf("Could not create client %s", err) - } - c.Client = cl - if _, v, e := c.Client.Ping(); e != nil { - return fmt.Errorf("Failed to connect to %s\n", c.Client.Addr()) - } else { - c.ServerVersion = v - } - - _, c.ServerVersion, _ = c.Client.Ping() - - return nil -} - -func (c *CommandLine) SetAuth(cmd string) { - // If they pass in the entire command, we should parse it - // auth - args := strings.Fields(cmd) - if len(args) == 3 { - args = args[1:] - } else { - args = []string{} - } - - if len(args) == 2 { - c.Username = args[0] - c.Password = args[1] - } else { - u, e := c.Line.Prompt("username: ") - if e != nil { - fmt.Printf("Unable to process input: %s", e) - return - } - c.Username = strings.TrimSpace(u) - p, e := c.Line.PasswordPrompt("password: ") - if e != nil { - fmt.Printf("Unable to process input: %s", e) - return - } - c.Password = p - } - - // Update the client as well - c.Client.SetAuth(c.Username, c.Password) -} - -func (c *CommandLine) use(cmd string) { - args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ") - if len(args) != 2 { - fmt.Printf("Could not parse database name from %q.\n", cmd) - return - } - d := args[1] - c.Database = d - fmt.Printf("Using database %s\n", d) -} - -func (c *CommandLine) SetPrecision(cmd string) { - // Remove the "precision" keyword if it exists - cmd = strings.TrimSpace(strings.Replace(cmd, "precision", "", -1)) - // normalize cmd - cmd = strings.ToLower(cmd) - - switch cmd { - case "h", "m", "s", "ms", "u", "ns": - c.Precision = cmd - c.Client.SetPrecision(c.Precision) - case "rfc3339": - c.Precision = "" - c.Client.SetPrecision(c.Precision) - default: - fmt.Printf("Unknown precision %q. Please use rfc3339, h, m, s, ms, u or ns.\n", cmd) - } -} - -func (c *CommandLine) SetFormat(cmd string) { - // Remove the "format" keyword if it exists - cmd = strings.TrimSpace(strings.Replace(cmd, "format", "", -1)) - // normalize cmd - cmd = strings.ToLower(cmd) - - switch cmd { - case "json", "csv", "column": - c.Format = cmd - default: - fmt.Printf("Unknown format %q. Please use json, csv, or column.\n", cmd) - } -} - -func (c *CommandLine) SetWriteConsistency(cmd string) { - // Remove the "consistency" keyword if it exists - cmd = strings.TrimSpace(strings.Replace(cmd, "consistency", "", -1)) - // normalize cmd - cmd = strings.ToLower(cmd) - - _, err := cluster.ParseConsistencyLevel(cmd) - if err != nil { - fmt.Printf("Unknown consistency level %q. Please use any, one, quorum, or all.\n", cmd) - return - } - c.WriteConsistency = cmd -} - -// isWhitespace returns true if the rune is a space, tab, or newline. -func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' } - -// isLetter returns true if the rune is a letter. -func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') } - -// isDigit returns true if the rune is a digit. -func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') } - -// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer. -func isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' } - -// isIdentChar returns true if the rune can be used in an unquoted identifier. -func isNotIdentChar(ch rune) bool { return !(isLetter(ch) || isDigit(ch) || ch == '_') } - -func parseUnquotedIdentifier(stmt string) (string, string) { - if fields := strings.FieldsFunc(stmt, isNotIdentChar); len(fields) > 0 { - return fields[0], strings.TrimPrefix(stmt, fields[0]) - } - return "", stmt -} - -func parseDoubleQuotedIdentifier(stmt string) (string, string) { - escapeNext := false - fields := strings.FieldsFunc(stmt, func(ch rune) bool { - if ch == '\\' { - escapeNext = true - } else if ch == '"' { - if !escapeNext { - return true - } - escapeNext = false - } - return false - }) - if len(fields) > 0 { - return fields[0], strings.TrimPrefix(stmt, "\""+fields[0]+"\"") - } - return "", stmt -} - -func parseNextIdentifier(stmt string) (ident, remainder string) { - if len(stmt) > 0 { - switch { - case isWhitespace(rune(stmt[0])): - return parseNextIdentifier(stmt[1:]) - case isIdentFirstChar(rune(stmt[0])): - return parseUnquotedIdentifier(stmt) - case stmt[0] == '"': - return parseDoubleQuotedIdentifier(stmt) - } - } - return "", stmt -} - -func (c *CommandLine) parseInto(stmt string) string { - ident, stmt := parseNextIdentifier(stmt) - if strings.HasPrefix(stmt, ".") { - c.Database = ident - fmt.Printf("Using database %s\n", c.Database) - ident, stmt = parseNextIdentifier(stmt[1:]) - } - if strings.HasPrefix(stmt, " ") { - c.RetentionPolicy = ident - fmt.Printf("Using retention policy %s\n", c.RetentionPolicy) - return stmt[1:] - } - return stmt -} - -func (c *CommandLine) Insert(stmt string) error { - i, point := parseNextIdentifier(stmt) - if !strings.EqualFold(i, "insert") { - fmt.Printf("ERR: found %s, expected INSERT\n", i) - return nil - } - if i, r := parseNextIdentifier(point); strings.EqualFold(i, "into") { - point = c.parseInto(r) - } - _, err := c.Client.Write(client.BatchPoints{ - Points: []client.Point{ - client.Point{Raw: point}, - }, - Database: c.Database, - RetentionPolicy: c.RetentionPolicy, - Precision: "n", - WriteConsistency: c.WriteConsistency, - }) - if err != nil { - fmt.Printf("ERR: %s\n", err) - if c.Database == "" { - fmt.Println("Note: error may be due to not setting a database or retention policy.") - fmt.Println(`Please set a database with the command "use " or`) - fmt.Println("INSERT INTO . ") - } - return err - } - return nil -} - -func (c *CommandLine) ExecuteQuery(query string) error { - response, err := c.Client.Query(client.Query{Command: query, Database: c.Database}) - if err != nil { - fmt.Printf("ERR: %s\n", err) - return err - } - c.FormatResponse(response, os.Stdout) - if err := response.Error(); err != nil { - fmt.Printf("ERR: %s\n", response.Error()) - if c.Database == "" { - fmt.Println("Warning: It is possible this error is due to not setting a database.") - fmt.Println(`Please set a database with the command "use ".`) - } - return err - } - return nil -} - -func (c *CommandLine) DatabaseToken() (string, error) { - response, err := c.Client.Query(client.Query{Command: "SHOW DIAGNOSTICS for 'registration'"}) - if err != nil { - return "", err - } - if response.Error() != nil || len((*response).Results[0].Series) == 0 { - return "", nil - } - - // Look for position of "token" column. - for i, s := range (*response).Results[0].Series[0].Columns { - if s == "token" { - return (*response).Results[0].Series[0].Values[0][i].(string), nil - } - } - return "", nil -} - -func (c *CommandLine) FormatResponse(response *client.Response, w io.Writer) { - switch c.Format { - case "json": - c.writeJSON(response, w) - case "csv": - c.writeCSV(response, w) - case "column": - c.writeColumns(response, w) - default: - fmt.Fprintf(w, "Unknown output format %q.\n", c.Format) - } -} - -func (c *CommandLine) writeJSON(response *client.Response, w io.Writer) { - var data []byte - var err error - if c.Pretty { - data, err = json.MarshalIndent(response, "", " ") - } else { - data, err = json.Marshal(response) - } - if err != nil { - fmt.Fprintf(w, "Unable to parse json: %s\n", err) - return - } - fmt.Fprintln(w, string(data)) -} - -func (c *CommandLine) writeCSV(response *client.Response, w io.Writer) { - csvw := csv.NewWriter(w) - for _, result := range response.Results { - // Create a tabbed writer for each result as they won't always line up - rows := c.formatResults(result, "\t") - for _, r := range rows { - csvw.Write(strings.Split(r, "\t")) - } - csvw.Flush() - } -} - -func (c *CommandLine) writeColumns(response *client.Response, w io.Writer) { - for _, result := range response.Results { - // Create a tabbed writer for each result a they won't always line up - w := new(tabwriter.Writer) - w.Init(os.Stdout, 0, 8, 1, '\t', 0) - csv := c.formatResults(result, "\t") - for _, r := range csv { - fmt.Fprintln(w, r) - } - w.Flush() - } -} - -// formatResults will behave differently if you are formatting for columns or csv -func (c *CommandLine) formatResults(result client.Result, separator string) []string { - rows := []string{} - // Create a tabbed writer for each result a they won't always line up - for i, row := range result.Series { - // gather tags - tags := []string{} - for k, v := range row.Tags { - tags = append(tags, fmt.Sprintf("%s=%s", k, v)) - sort.Strings(tags) - } - - columnNames := []string{} - - // Only put name/tags in a column if format is csv - if c.Format == "csv" { - if len(tags) > 0 { - columnNames = append([]string{"tags"}, columnNames...) - } - - if row.Name != "" { - columnNames = append([]string{"name"}, columnNames...) - } - } - - for _, column := range row.Columns { - columnNames = append(columnNames, column) - } - - // Output a line separator if we have more than one set or results and format is column - if i > 0 && c.Format == "column" { - rows = append(rows, "") - } - - // If we are column format, we break out the name/tag to seperate lines - if c.Format == "column" { - if row.Name != "" { - n := fmt.Sprintf("name: %s", row.Name) - rows = append(rows, n) - if len(tags) == 0 { - l := strings.Repeat("-", len(n)) - rows = append(rows, l) - } - } - if len(tags) > 0 { - t := fmt.Sprintf("tags: %s", (strings.Join(tags, ", "))) - rows = append(rows, t) - } - } - - rows = append(rows, strings.Join(columnNames, separator)) - - // if format is column, break tags to their own line/format - if c.Format == "column" && len(tags) > 0 { - lines := []string{} - for _, columnName := range columnNames { - lines = append(lines, strings.Repeat("-", len(columnName))) - } - rows = append(rows, strings.Join(lines, separator)) - } - - for _, v := range row.Values { - var values []string - if c.Format == "csv" { - if row.Name != "" { - values = append(values, row.Name) - } - if len(tags) > 0 { - values = append(values, strings.Join(tags, ",")) - } - } - - for _, vv := range v { - values = append(values, interfaceToString(vv)) - } - rows = append(rows, strings.Join(values, separator)) - } - // Outout a line separator if in column format - if c.Format == "column" { - rows = append(rows, "") - } - } - return rows -} - -func interfaceToString(v interface{}) string { - switch t := v.(type) { - case nil: - return "" - case bool: - return fmt.Sprintf("%v", v) - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr: - return fmt.Sprintf("%d", t) - case float32, float64: - return fmt.Sprintf("%v", t) - default: - return fmt.Sprintf("%v", t) - } -} - -func (c *CommandLine) Settings() { - w := new(tabwriter.Writer) - w.Init(os.Stdout, 0, 8, 1, '\t', 0) - if c.Port > 0 { - fmt.Fprintf(w, "Host\t%s:%d\n", c.Host, c.Port) - } else { - fmt.Fprintf(w, "Host\t%s\n", c.Host) - } - fmt.Fprintf(w, "Username\t%s\n", c.Username) - fmt.Fprintf(w, "Database\t%s\n", c.Database) - fmt.Fprintf(w, "Pretty\t%v\n", c.Pretty) - fmt.Fprintf(w, "Format\t%s\n", c.Format) - fmt.Fprintf(w, "Write Consistency\t%s\n", c.WriteConsistency) - fmt.Fprintln(w) - w.Flush() -} - -func (c *CommandLine) help() { - fmt.Println(`Usage: - connect connect to another node - auth prompt for username and password - pretty toggle pretty print - use set current databases - format set the output format: json, csv, or column - precision set the timestamp format: h,m,s,ms,u,ns - consistency set write consistency level: any, one, quorum, or all - settings output the current settings for the shell - exit quit the influx shell - - show databases show database names - show series show series information - show measurements show measurement information - show tag keys show tag key information - show tag values show tag value information - - a full list of influxql commands can be found at: - https://influxdb.com/docs/v0.9/query_language/spec.html -`) -} - -func (c *CommandLine) history() { - usr, err := user.Current() - // Only load history if we can get the user - if err == nil { - historyFile := filepath.Join(usr.HomeDir, ".influx_history") - if history, err := ioutil.ReadFile(historyFile); err == nil { - fmt.Print(string(history)) - } - } -} - -func (c *CommandLine) gopher() { - fmt.Println(` - .-::-::://:-::- .:/++/' - '://:-''/oo+//++o+/.://o- ./+: - .:-. '++- .o/ '+yydhy' o- - .:/. .h: :osoys .smMN- :/ - -/:.' s- /MMMymh. '/y/ s' - -+s:'''' d -mMMms// '-/o: - -/++/++/////:. o: '... s- :s. - :+-+s-' ':/' 's- /+ 'o: - '+-'o: /ydhsh. '//. '-o- o- - .y. o: .MMMdm+y ':+++:::/+:.' s: - .-h/ y- 'sdmds'h -+ydds:::-.' 'h. - .//-.d' o: '.' 'dsNMMMNh:.:++' :y - +y. 'd 's. .s:mddds: ++ o/ - 'N- odd 'o/. './o-s-' .---+++' o- - 'N' yNd .://:/:::::. -s -+/s/./s' 'o/' - so' .h '''' ////s: '+. .s +y' - os/-.y' 's' 'y::+ +d' - '.:o/ -+:-:.' so.---.' - o' 'd-.''/s' - .s' :y.''.y - -s mo:::' - :: yh - // '''' /M' - o+ .s///:/. 'N: - :+ /: -s' ho - 's- -/s/:+/.+h' +h - ys' ':' '-. -d - oh .h - /o .s - s. .h - -y .d - m/ -h - +d /o - 'N- y: - h: m. - s- -d - o- s+ - +- 'm' - s/ oo--. - y- /s ':+' - s' 'od--' .d: - -+ ':o: ':+-/+ - y- .:+- ' - //o- '.:+/. - .-:+/' ''-/+/. - ./:' ''.:o+/-' - .+o:/:/+-' ''.-+ooo/-' - o: -h///++////-. - /: .o/ - //+ 'y - ./sooy. - -`) -} - -func (c *CommandLine) Version() { - fmt.Println("InfluxDB shell " + c.ClientVersion) -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/cli/cli_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/cli/cli_test.go deleted file mode 100644 index 00755d123..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/cli/cli_test.go +++ /dev/null @@ -1,238 +0,0 @@ -package cli_test - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/influxdb/influxdb/client" - "github.com/influxdb/influxdb/cmd/influx/cli" -) - -func TestParseCommand_CommandsExist(t *testing.T) { - t.Parallel() - c := cli.CommandLine{} - tests := []struct { - cmd string - }{ - {cmd: "gopher"}, - {cmd: "connect"}, - {cmd: "help"}, - {cmd: "pretty"}, - {cmd: "use"}, - {cmd: ""}, // test that a blank command just returns - } - for _, test := range tests { - if !c.ParseCommand(test.cmd) { - t.Fatalf(`Command failed for %q.`, test.cmd) - } - } -} - -func TestParseCommand_TogglePretty(t *testing.T) { - t.Parallel() - c := cli.CommandLine{} - if c.Pretty { - t.Fatalf(`Pretty should be false.`) - } - c.ParseCommand("pretty") - if !c.Pretty { - t.Fatalf(`Pretty should be true.`) - } - c.ParseCommand("pretty") - if c.Pretty { - t.Fatalf(`Pretty should be false.`) - } -} - -func TestParseCommand_Exit(t *testing.T) { - t.Parallel() - c := cli.CommandLine{} - tests := []struct { - cmd string - }{ - {cmd: "exit"}, - {cmd: " exit"}, - {cmd: "exit "}, - {cmd: "Exit "}, - } - - for _, test := range tests { - if c.ParseCommand(test.cmd) { - t.Fatalf(`Command "exit" failed for %q.`, test.cmd) - } - } -} - -func TestParseCommand_Use(t *testing.T) { - t.Parallel() - c := cli.CommandLine{} - tests := []struct { - cmd string - }{ - {cmd: "use db"}, - {cmd: " use db"}, - {cmd: "use db "}, - {cmd: "use db;"}, - {cmd: "use db; "}, - {cmd: "Use db"}, - } - - for _, test := range tests { - if !c.ParseCommand(test.cmd) { - t.Fatalf(`Command "use" failed for %q.`, test.cmd) - } - - if c.Database != "db" { - t.Fatalf(`Command "use" changed database to %q. Expected db`, c.Database) - } - } -} - -func TestParseCommand_Consistency(t *testing.T) { - t.Parallel() - c := cli.CommandLine{} - tests := []struct { - cmd string - }{ - {cmd: "consistency one"}, - {cmd: " consistency one"}, - {cmd: "consistency one "}, - {cmd: "consistency one;"}, - {cmd: "consistency one; "}, - {cmd: "Consistency one"}, - } - - for _, test := range tests { - if !c.ParseCommand(test.cmd) { - t.Fatalf(`Command "consistency" failed for %q.`, test.cmd) - } - - if c.WriteConsistency != "one" { - t.Fatalf(`Command "consistency" changed consistency to %q. Expected one`, c.WriteConsistency) - } - } -} - -func TestParseCommand_Insert(t *testing.T) { - t.Parallel() - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var data client.Response - w.WriteHeader(http.StatusNoContent) - _ = json.NewEncoder(w).Encode(data) - })) - defer ts.Close() - - u, _ := url.Parse(ts.URL) - config := client.Config{URL: *u} - c, err := client.NewClient(config) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - m := cli.CommandLine{Client: c} - - tests := []struct { - cmd string - }{ - {cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"}, - {cmd: " INSERT cpu,host=serverA,region=us-west value=1.0"}, - {cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"}, - {cmd: "insert cpu,host=serverA,region=us-west value=1.0 "}, - {cmd: "insert"}, - {cmd: "Insert "}, - {cmd: "insert c"}, - {cmd: "insert int"}, - } - - for _, test := range tests { - if !m.ParseCommand(test.cmd) { - t.Fatalf(`Command "insert" failed for %q.`, test.cmd) - } - } -} - -func TestParseCommand_InsertInto(t *testing.T) { - t.Parallel() - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var data client.Response - w.WriteHeader(http.StatusNoContent) - _ = json.NewEncoder(w).Encode(data) - })) - defer ts.Close() - - u, _ := url.Parse(ts.URL) - config := client.Config{URL: *u} - c, err := client.NewClient(config) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - m := cli.CommandLine{Client: c} - - tests := []struct { - cmd, db, rp string - }{ - { - cmd: `INSERT INTO test cpu,host=serverA,region=us-west value=1.0`, - db: "", - rp: "test", - }, - { - cmd: ` INSERT INTO .test cpu,host=serverA,region=us-west value=1.0`, - db: "", - rp: "test", - }, - { - cmd: `INSERT INTO "test test" cpu,host=serverA,region=us-west value=1.0`, - db: "", - rp: "test test", - }, - { - cmd: `Insert iNTO test.test cpu,host=serverA,region=us-west value=1.0`, - db: "test", - rp: "test", - }, - { - cmd: `insert into "test test" cpu,host=serverA,region=us-west value=1.0`, - db: "test", - rp: "test test", - }, - { - cmd: `insert into "d b"."test test" cpu,host=serverA,region=us-west value=1.0`, - db: "d b", - rp: "test test", - }, - } - - for _, test := range tests { - if !m.ParseCommand(test.cmd) { - t.Fatalf(`Command "insert into" failed for %q.`, test.cmd) - } - if m.Database != test.db { - t.Fatalf(`Command "insert into" db parsing failed, expected: %q, actual: %q`, test.db, m.Database) - } - if m.RetentionPolicy != test.rp { - t.Fatalf(`Command "insert into" rp parsing failed, expected: %q, actual: %q`, test.rp, m.RetentionPolicy) - } - } -} - -func TestParseCommand_History(t *testing.T) { - t.Parallel() - c := cli.CommandLine{} - tests := []struct { - cmd string - }{ - {cmd: "history"}, - {cmd: " history"}, - {cmd: "history "}, - {cmd: "History "}, - } - - for _, test := range tests { - if !c.ParseCommand(test.cmd) { - t.Fatalf(`Command "history" failed for %q.`, test.cmd) - } - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main.go deleted file mode 100644 index debaccb17..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main.go +++ /dev/null @@ -1,103 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - - "github.com/influxdb/influxdb/client" - "github.com/influxdb/influxdb/cmd/influx/cli" -) - -// These variables are populated via the Go linker. -var ( - version = "0.9" -) - -const ( - // defaultFormat is the default format of the results when issuing queries - defaultFormat = "column" - - // defaultPrecision is the default timestamp format of the results when issuing queries - defaultPrecision = "ns" - - // defaultPPS is the default points per second that the import will throttle at - // by default it's 0, which means it will not throttle - defaultPPS = 0 -) - -func main() { - c := cli.New(version) - - fs := flag.NewFlagSet("InfluxDB shell version "+version, flag.ExitOnError) - fs.StringVar(&c.Host, "host", client.DefaultHost, "Influxdb host to connect to.") - fs.IntVar(&c.Port, "port", client.DefaultPort, "Influxdb port to connect to.") - fs.StringVar(&c.Username, "username", c.Username, "Username to connect to the server.") - fs.StringVar(&c.Password, "password", c.Password, `Password to connect to the server. Leaving blank will prompt for password (--password="").`) - fs.StringVar(&c.Database, "database", c.Database, "Database to connect to the server.") - fs.BoolVar(&c.Ssl, "ssl", false, "Use https for connecting to cluster.") - fs.StringVar(&c.Format, "format", defaultFormat, "Format specifies the format of the server responses: json, csv, or column.") - fs.StringVar(&c.Precision, "precision", defaultPrecision, "Precision specifies the format of the timestamp: rfc3339,h,m,s,ms,u or ns.") - fs.StringVar(&c.WriteConsistency, "consistency", "any", "Set write consistency level: any, one, quorum, or all.") - fs.BoolVar(&c.Pretty, "pretty", false, "Turns on pretty print for the json format.") - fs.StringVar(&c.Execute, "execute", c.Execute, "Execute command and quit.") - fs.BoolVar(&c.ShowVersion, "version", false, "Displays the InfluxDB version.") - fs.BoolVar(&c.Import, "import", false, "Import a previous database.") - fs.IntVar(&c.PPS, "pps", defaultPPS, "How many points per second the import will allow. By default it is zero and will not throttle importing.") - fs.StringVar(&c.Path, "path", "", "path to the file to import") - fs.BoolVar(&c.Compressed, "compressed", false, "set to true if the import file is compressed") - - // Define our own custom usage to print - fs.Usage = func() { - fmt.Println(`Usage of influx: - -version - Display the version and exit. - -host 'host name' - Host to connect to. - -port 'port #' - Port to connect to. - -database 'database name' - Database to connect to the server. - -password 'password' - Password to connect to the server. Leaving blank will prompt for password (--password ''). - -username 'username' - Username to connect to the server. - -ssl - Use https for requests. - -execute 'command' - Execute command and quit. - -format 'json|csv|column' - Format specifies the format of the server responses: json, csv, or column. - -precision 'rfc3339|h|m|s|ms|u|ns' - Precision specifies the format of the timestamp: rfc3339, h, m, s, ms, u or ns. - -consistency 'any|one|quorum|all' - Set write consistency level: any, one, quorum, or all - -pretty - Turns on pretty print for the json format. - -import - Import a previous database export from file - -pps - How many points per second the import will allow. By default it is zero and will not throttle importing. - -path - Path to file to import - -compressed - Set to true if the import file is compressed - -Examples: - - # Use influx in a non-interactive mode to query the database "metrics" and pretty print json: - $ influx -database 'metrics' -execute 'select * from cpu' -format 'json' -pretty - - # Connect to a specific database on startup and set database context: - $ influx -database 'metrics' -host 'localhost' -port '8086' -`) - } - fs.Parse(os.Args[1:]) - - if c.ShowVersion { - c.Version() - os.Exit(0) - } - - c.Run() -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_inspect/info.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_inspect/info.go deleted file mode 100644 index 381486b3e..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_inspect/info.go +++ /dev/null @@ -1,109 +0,0 @@ -package main - -import ( - "encoding/binary" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "strings" - "text/tabwriter" - - "github.com/influxdb/influxdb/tsdb" -) - -func cmdInfo(path string) { - tstore := tsdb.NewStore(filepath.Join(path, "data")) - tstore.Logger = log.New(ioutil.Discard, "", log.LstdFlags) - tstore.EngineOptions.Config.Dir = filepath.Join(path, "data") - tstore.EngineOptions.Config.WALLoggingEnabled = false - tstore.EngineOptions.Config.WALDir = filepath.Join(path, "wal") - if err := tstore.Open(); err != nil { - fmt.Printf("Failed to open dir: %v\n", err) - os.Exit(1) - } - - size, err := tstore.DiskSize() - if err != nil { - fmt.Printf("Failed to determine disk usage: %v\n", err) - } - - // Summary stats - fmt.Printf("Shards: %d, Indexes: %d, Databases: %d, Disk Size: %d, Series: %d\n\n", - tstore.ShardN(), tstore.DatabaseIndexN(), len(tstore.Databases()), size, countSeries(tstore)) - - tw := tabwriter.NewWriter(os.Stdout, 16, 8, 0, '\t', 0) - - fmt.Fprintln(tw, strings.Join([]string{"Shard", "DB", "Measurement", "Tags [#K/#V]", "Fields [Name:Type]", "Series"}, "\t")) - - shardIDs := tstore.ShardIDs() - - databases := tstore.Databases() - sort.Strings(databases) - - for _, db := range databases { - index := tstore.DatabaseIndex(db) - measurements := index.Measurements() - sort.Sort(measurements) - for _, m := range measurements { - tags := m.TagKeys() - tagValues := 0 - for _, tag := range tags { - tagValues += len(m.TagValues(tag)) - } - fields := m.FieldNames() - sort.Strings(fields) - series := m.SeriesKeys() - sort.Strings(series) - sort.Sort(ShardIDs(shardIDs)) - - // Sample a point from each measurement to determine the field types - for _, shardID := range shardIDs { - shard := tstore.Shard(shardID) - codec := shard.FieldCodec(m.Name) - for _, field := range codec.Fields() { - ft := fmt.Sprintf("%s:%s", field.Name, field.Type) - fmt.Fprintf(tw, "%d\t%s\t%s\t%d/%d\t%d [%s]\t%d\n", shardID, db, m.Name, len(tags), tagValues, - len(fields), ft, len(series)) - - } - - } - } - } - tw.Flush() -} - -func countSeries(tstore *tsdb.Store) int { - var count int - for _, shardID := range tstore.ShardIDs() { - shard := tstore.Shard(shardID) - cnt, err := shard.SeriesCount() - if err != nil { - fmt.Printf("series count failed: %v\n", err) - continue - } - count += cnt - } - return count -} - -func btou64(b []byte) uint64 { - return binary.BigEndian.Uint64(b) -} - -// u64tob converts a uint64 into an 8-byte slice. -func u64tob(v uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, v) - return b -} - -// ShardIDs is a collection of UINT 64 that represent shard ids. -type ShardIDs []uint64 - -func (a ShardIDs) Len() int { return len(a) } -func (a ShardIDs) Less(i, j int) bool { return a[i] < a[j] } -func (a ShardIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_inspect/main.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_inspect/main.go deleted file mode 100644 index 647376b87..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_inspect/main.go +++ /dev/null @@ -1,87 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - - _ "github.com/influxdb/influxdb/tsdb/engine" -) - -func usage() { - println(`Usage: influx_inspect [options] - -Displays detailed information about InfluxDB data files. -`) - - println(`Commands: - info - displays series meta-data for all shards. Default location [$HOME/.influxdb] - dumptsm - dumps low-level details about tsm1 files.`) - println() -} - -func main() { - - flag.Usage = usage - flag.Parse() - - if len(flag.Args()) == 0 { - flag.Usage() - os.Exit(0) - } - - switch flag.Args()[0] { - case "info": - var path string - fs := flag.NewFlagSet("info", flag.ExitOnError) - fs.StringVar(&path, "dir", os.Getenv("HOME")+"/.influxdb", "Root storage path. [$HOME/.influxdb]") - - fs.Usage = func() { - println("Usage: influx_inspect info [options]\n\n Displays series meta-data for all shards..") - println() - println("Options:") - fs.PrintDefaults() - } - - if err := fs.Parse(flag.Args()[1:]); err != nil { - fmt.Printf("%v", err) - os.Exit(1) - } - cmdInfo(path) - case "dumptsm": - var dumpAll bool - opts := &tsdmDumpOpts{} - fs := flag.NewFlagSet("file", flag.ExitOnError) - fs.BoolVar(&opts.dumpIndex, "index", false, "Dump raw index data") - fs.BoolVar(&opts.dumpBlocks, "blocks", false, "Dump raw block data") - fs.BoolVar(&dumpAll, "all", false, "Dump all data. Caution: This may print a lot of information") - fs.StringVar(&opts.filterKey, "filter-key", "", "Only display index and block data match this key substring") - - fs.Usage = func() { - println("Usage: influx_inspect dumptsm [options] \n\n Dumps low-level details about tsm1 files.") - println() - println("Options:") - fs.PrintDefaults() - os.Exit(0) - } - - if err := fs.Parse(flag.Args()[1:]); err != nil { - fmt.Printf("%v", err) - os.Exit(1) - } - - if len(fs.Args()) == 0 || fs.Args()[0] == "" { - fmt.Printf("TSM file not specified\n\n") - fs.Usage() - fs.PrintDefaults() - os.Exit(1) - } - opts.path = fs.Args()[0] - opts.dumpBlocks = opts.dumpBlocks || dumpAll || opts.filterKey != "" - opts.dumpIndex = opts.dumpIndex || dumpAll || opts.filterKey != "" - cmdDumpTsm1(opts) - default: - flag.Usage() - os.Exit(1) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_inspect/tsm.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_inspect/tsm.go deleted file mode 100644 index 6b82b8e6f..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_inspect/tsm.go +++ /dev/null @@ -1,443 +0,0 @@ -package main - -import ( - "encoding/binary" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "text/tabwriter" - "time" - - "github.com/golang/snappy" - "github.com/influxdb/influxdb/tsdb" - "github.com/influxdb/influxdb/tsdb/engine/tsm1" -) - -type tsdmDumpOpts struct { - dumpIndex bool - dumpBlocks bool - filterKey string - path string -} - -type tsmIndex struct { - series int - offset int64 - minTime time.Time - maxTime time.Time - blocks []*block -} - -type block struct { - id uint64 - offset int64 -} - -type blockStats struct { - min, max int - counts [][]int -} - -func (b *blockStats) inc(typ int, enc byte) { - for len(b.counts) <= typ { - b.counts = append(b.counts, []int{}) - } - for len(b.counts[typ]) <= int(enc) { - b.counts[typ] = append(b.counts[typ], 0) - } - b.counts[typ][enc]++ -} - -func (b *blockStats) size(sz int) { - if b.min == 0 || sz < b.min { - b.min = sz - } - if b.min == 0 || sz > b.max { - b.max = sz - } -} - -var ( - fieldType = []string{ - "timestamp", "float", "int", "bool", "string", - } - blockTypes = []string{ - "float64", "int64", "bool", "string", - } - timeEnc = []string{ - "none", "s8b", "rle", - } - floatEnc = []string{ - "none", "gor", - } - intEnc = []string{ - "none", "s8b", "rle", - } - boolEnc = []string{ - "none", "bp", - } - stringEnc = []string{ - "none", "snpy", - } - encDescs = [][]string{ - timeEnc, floatEnc, intEnc, boolEnc, stringEnc, - } -) - -func readFields(path string) (map[string]*tsdb.MeasurementFields, error) { - fields := make(map[string]*tsdb.MeasurementFields) - - f, err := os.OpenFile(filepath.Join(path, tsm1.FieldsFileExtension), os.O_RDONLY, 0666) - if os.IsNotExist(err) { - return fields, nil - } else if err != nil { - return nil, err - } - b, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - data, err := snappy.Decode(nil, b) - if err != nil { - return nil, err - } - - if err := json.Unmarshal(data, &fields); err != nil { - return nil, err - } - return fields, nil -} - -func readSeries(path string) (map[string]*tsdb.Series, error) { - series := make(map[string]*tsdb.Series) - - f, err := os.OpenFile(filepath.Join(path, tsm1.SeriesFileExtension), os.O_RDONLY, 0666) - if os.IsNotExist(err) { - return series, nil - } else if err != nil { - return nil, err - } - defer f.Close() - b, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - data, err := snappy.Decode(nil, b) - if err != nil { - return nil, err - } - - if err := json.Unmarshal(data, &series); err != nil { - return nil, err - } - - return series, nil -} - -func readIds(path string) (map[string]uint64, error) { - f, err := os.OpenFile(filepath.Join(path, tsm1.IDsFileExtension), os.O_RDONLY, 0666) - if os.IsNotExist(err) { - return nil, nil - } else if err != nil { - return nil, err - } - b, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - b, err = snappy.Decode(nil, b) - if err != nil { - return nil, err - } - - ids := make(map[string]uint64) - if b != nil { - if err := json.Unmarshal(b, &ids); err != nil { - return nil, err - } - } - return ids, err -} -func readIndex(f *os.File) (*tsmIndex, error) { - // Get the file size - stat, err := f.Stat() - if err != nil { - return nil, err - } - - // Seek to the series count - f.Seek(-4, os.SEEK_END) - b := make([]byte, 8) - _, err = f.Read(b[:4]) - if err != nil { - return nil, err - } - - seriesCount := binary.BigEndian.Uint32(b) - - // Get the min time - f.Seek(-20, os.SEEK_END) - f.Read(b) - minTime := time.Unix(0, int64(btou64(b))) - - // Get max time - f.Seek(-12, os.SEEK_END) - f.Read(b) - maxTime := time.Unix(0, int64(btou64(b))) - - // Figure out where the index starts - indexStart := stat.Size() - int64(seriesCount*12+20) - - // Seek to the start of the index - f.Seek(indexStart, os.SEEK_SET) - count := int(seriesCount) - index := &tsmIndex{ - offset: indexStart, - minTime: minTime, - maxTime: maxTime, - series: count, - } - - if indexStart < 0 { - return nil, fmt.Errorf("index corrupt: offset=%d", indexStart) - } - - // Read the index entries - for i := 0; i < count; i++ { - f.Read(b) - id := binary.BigEndian.Uint64(b) - f.Read(b[:4]) - pos := binary.BigEndian.Uint32(b[:4]) - index.blocks = append(index.blocks, &block{id: id, offset: int64(pos)}) - } - - return index, nil -} - -func cmdDumpTsm1(opts *tsdmDumpOpts) { - var errors []error - - f, err := os.Open(opts.path) - if err != nil { - println(err.Error()) - os.Exit(1) - } - - // Get the file size - stat, err := f.Stat() - if err != nil { - println(err.Error()) - os.Exit(1) - } - - b := make([]byte, 8) - f.Read(b[:4]) - - // Verify magic number - if binary.BigEndian.Uint32(b[:4]) != 0x16D116D1 { - println("Not a tsm1 file.") - os.Exit(1) - } - - ids, err := readIds(filepath.Dir(opts.path)) - if err != nil { - println("Failed to read series:", err.Error()) - os.Exit(1) - } - - invIds := map[uint64]string{} - for k, v := range ids { - invIds[v] = k - } - - index, err := readIndex(f) - if err != nil { - println("Failed to readIndex:", err.Error()) - - // Create a stubbed out index so we can still try and read the block data directly - // w/o panicing ourselves. - index = &tsmIndex{ - minTime: time.Unix(0, 0), - maxTime: time.Unix(0, 0), - offset: stat.Size(), - } - } - - blockStats := &blockStats{} - - println("Summary:") - fmt.Printf(" File: %s\n", opts.path) - fmt.Printf(" Time Range: %s - %s\n", - index.minTime.UTC().Format(time.RFC3339Nano), - index.maxTime.UTC().Format(time.RFC3339Nano), - ) - fmt.Printf(" Duration: %s ", index.maxTime.Sub(index.minTime)) - fmt.Printf(" Series: %d ", index.series) - fmt.Printf(" File Size: %d\n", stat.Size()) - println() - - tw := tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0) - fmt.Fprintln(tw, " "+strings.Join([]string{"Pos", "ID", "Ofs", "Key", "Field"}, "\t")) - for i, block := range index.blocks { - key := invIds[block.id] - split := strings.Split(key, "#!~#") - - // We dont' know know if we have fields so use an informative default - var measurement, field string = "UNKNOWN", "UNKNOWN" - - // We read some IDs from the ids file - if len(invIds) > 0 { - // Change the default to error until we know we have a valid key - measurement = "ERR" - field = "ERR" - - // Possible corruption? Try to read as much as we can and point to the problem. - if key == "" { - errors = append(errors, fmt.Errorf("index pos %d, field id: %d, missing key for id", i, block.id)) - } else if len(split) < 2 { - errors = append(errors, fmt.Errorf("index pos %d, field id: %d, key corrupt: got '%v'", i, block.id, key)) - } else { - measurement = split[0] - field = split[1] - } - } - - if opts.filterKey != "" && !strings.Contains(key, opts.filterKey) { - continue - } - fmt.Fprintln(tw, " "+strings.Join([]string{ - strconv.FormatInt(int64(i), 10), - strconv.FormatUint(block.id, 10), - strconv.FormatInt(int64(block.offset), 10), - measurement, - field, - }, "\t")) - } - - if opts.dumpIndex { - println("Index:") - tw.Flush() - println() - } - - tw = tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0) - fmt.Fprintln(tw, " "+strings.Join([]string{"Blk", "Ofs", "Len", "ID", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t")) - - // Staring at 4 because the magic number is 4 bytes - i := int64(4) - var blockCount, pointCount, blockSize int64 - indexSize := stat.Size() - index.offset - - // Start at the beginning and read every block - for i < index.offset { - f.Seek(int64(i), 0) - - f.Read(b) - id := btou64(b) - f.Read(b[:4]) - length := binary.BigEndian.Uint32(b[:4]) - buf := make([]byte, length) - f.Read(buf) - - blockSize += int64(len(buf)) + 12 - - startTime := time.Unix(0, int64(btou64(buf[:8]))) - blockType := buf[8] - - encoded := buf[9:] - - var v []tsm1.Value - err := tsm1.DecodeBlock(buf, &v) - if err != nil { - fmt.Printf("error: %v\n", err.Error()) - os.Exit(1) - } - - pointCount += int64(len(v)) - - // Length of the timestamp block - tsLen, j := binary.Uvarint(encoded) - - // Unpack the timestamp bytes - ts := encoded[int(j) : int(j)+int(tsLen)] - - // Unpack the value bytes - values := encoded[int(j)+int(tsLen):] - - tsEncoding := timeEnc[int(ts[0]>>4)] - vEncoding := encDescs[int(blockType+1)][values[0]>>4] - - typeDesc := blockTypes[blockType] - - blockStats.inc(0, ts[0]>>4) - blockStats.inc(int(blockType+1), values[0]>>4) - blockStats.size(len(buf)) - - if opts.filterKey != "" && !strings.Contains(invIds[id], opts.filterKey) { - i += (12 + int64(length)) - blockCount++ - continue - } - - fmt.Fprintln(tw, " "+strings.Join([]string{ - strconv.FormatInt(blockCount, 10), - strconv.FormatInt(i, 10), - strconv.FormatInt(int64(len(buf)), 10), - strconv.FormatUint(id, 10), - typeDesc, - startTime.UTC().Format(time.RFC3339Nano), - strconv.FormatInt(int64(len(v)), 10), - fmt.Sprintf("%s/%s", tsEncoding, vEncoding), - fmt.Sprintf("%d/%d", len(ts), len(values)), - }, "\t")) - - i += (12 + int64(length)) - blockCount++ - } - if opts.dumpBlocks { - println("Blocks:") - tw.Flush() - println() - } - - fmt.Printf("Statistics\n") - fmt.Printf(" Blocks:\n") - fmt.Printf(" Total: %d Size: %d Min: %d Max: %d Avg: %d\n", - blockCount, blockSize, blockStats.min, blockStats.max, blockSize/blockCount) - fmt.Printf(" Index:\n") - fmt.Printf(" Total: %d Size: %d\n", len(index.blocks), indexSize) - fmt.Printf(" Points:\n") - fmt.Printf(" Total: %d", pointCount) - println() - - println(" Encoding:") - for i, counts := range blockStats.counts { - if len(counts) == 0 { - continue - } - fmt.Printf(" %s: ", strings.Title(fieldType[i])) - for j, v := range counts { - fmt.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100)) - } - println() - } - fmt.Printf(" Compression:\n") - fmt.Printf(" Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount)) - fmt.Printf(" Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount)) - - if len(errors) > 0 { - println() - fmt.Printf("Errors (%d):\n", len(errors)) - for _, err := range errors { - fmt.Printf(" * %v\n", err) - } - println() - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/100B_FLAT.toml b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/100B_FLAT.toml deleted file mode 100644 index dfa60b083..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/100B_FLAT.toml +++ /dev/null @@ -1,30 +0,0 @@ -channel_buffer_size = 100000 - -[write] - concurrency = 10 - batch_size = 10000 - batch_interval = "0s" - database = "stress" - precision = "n" - address = "localhost:8086" - reset_database = true - start_date = "2006-Jan-02" - -[[series]] - tick = "10s" - jitter = false - point_count = 1000000 # number of points that will be written for each of the series - measurement = "cpu" - series_count = 100000 - - [[series.tag]] - key = "host" - value = "server" - - [[series.tag]] - key = "location" - value = "loc" - - [[series.field]] - key = "value" - type = "float64-flat" diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/100B_STD.toml b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/100B_STD.toml deleted file mode 100644 index f2de3f0d8..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/100B_STD.toml +++ /dev/null @@ -1,30 +0,0 @@ -channel_buffer_size = 100000 - -[write] - concurrency = 10 - batch_size = 10000 - batch_interval = "0s" - database = "stress" - precision = "n" - address = "localhost:8086" - reset_database = true - start_date = "2006-Jan-02" - -[[series]] - tick = "1ns" - jitter = true - point_count = 1000000 # number of points that will be written for each of the series - measurement = "cpu" - series_count = 100000 - - [[series.tag]] - key = "host" - value = "server" - - [[series.tag]] - key = "location" - value = "loc" - - [[series.field]] - key = "value" - type = "float64" diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/100M_FLAT.toml b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/100M_FLAT.toml deleted file mode 100644 index 7f2c3d03d..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/100M_FLAT.toml +++ /dev/null @@ -1,47 +0,0 @@ -channel_buffer_size = 100000 - -[write] - concurrency = 10 - batch_size = 10000 - batch_interval = "0s" - database = "stress" - precision = "s" - address = "localhost:8086" - reset_database = true - start_date = "2006-Jan-02" - -[[series]] - tick = "5s" - jitter = false - point_count = 10000 # number of points that will be written for each of the series - measurement = "cpu" - series_count = 10000 - - # tag_count = 20 # number of "generic" tags on a series (e.g. tag-key-1=tag-value, ... ,tag-key-20=tag-value) - - [[series.tag]] - key = "host" - value = "server" - - [[series.tag]] - key = "location" - value = "loc" - - [[series.field]] - key = "value" - type = "float64" - - # Doesnt work as expected -## [[series.field]] -## key = "value-2" -## type = "float64-inc" -## -## [[series.field]] -## key = "value-3" -## type = "float64-inc+" - - # Has 80% probability of being a constant value - [[series.field]] - key = "flat_value" - type = "float64-flat" - diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/100M_STD.toml b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/100M_STD.toml deleted file mode 100644 index 8d28f4f1e..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/100M_STD.toml +++ /dev/null @@ -1,32 +0,0 @@ -channel_buffer_size = 100000 - -[write] - concurrency = 10 - batch_size = 10000 - batch_interval = "0s" - database = "stress" - precision = "s" - address = "localhost:8086" - reset_database = true - start_date = "2006-Jan-02" - -[[series]] - tick = "5s" - jitter = false - point_count = 10000 # number of points that will be written for each of the series - measurement = "cpu" - series_count = 10000 - - # tag_count = 20 # number of "generic" tags on a series (e.g. tag-key-1=tag-value, ... ,tag-key-20=tag-value) - - [[series.tag]] - key = "host" - value = "server" - - [[series.tag]] - key = "location" - value = "loc" - - [[series.field]] - key = "value" - type = "float64" diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/5M_STD.toml b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/5M_STD.toml deleted file mode 100644 index 6684e9c20..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/5M_STD.toml +++ /dev/null @@ -1,33 +0,0 @@ -channel_buffer_size = 100000 - -[write] - concurrency = 10 - batch_size = 5000 - batch_interval = "0s" - database = "stress" - precision = "s" - address = "localhost:8086" - reset_database = true - start_date = "2006-Jan-02" - -[[series]] - tick = "5s" - jitter = false - point_count = 50 # number of points that will be written for each of the series - measurement = "cpu" - series_count = 100000 - - # tag_count = 20 # number of "generic" tags on a series (e.g. tag-key-1=tag-value, ... ,tag-key-20=tag-value) - - [[series.tag]] - key = "host" - value = "idk" - - [[series.tag]] - key = "location" - value = "lame" - - [[series.field]] - key = "value" - type = "float64" - diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/long_form_date.toml b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/long_form_date.toml deleted file mode 100644 index 38b860204..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/long_form_date.toml +++ /dev/null @@ -1,34 +0,0 @@ -channel_buffer_size = 100000 - -[write] - concurrency = 10 - batch_size = 10000 - batch_interval = "0s" - database = "stress" - precision = "s" - address = "localhost:8086" - reset_database = true - start_date = "Jan 2, 2006 at 3:04pm (MST)" - -[[series]] - tick = "5s" - jitter = true - point_count = 10000 # number of points that will be written for each of the series - measurement = "cpu" - series_count = 10000 - - # tag_count = 20 # number of "generic" tags on a series (e.g. tag-key-1=tag-value, ... ,tag-key-20=tag-value) - - [[series.tag]] - key = "host" - value = "idk" - - [[series.tag]] - key = "location" - value = "lame" - - [[series.field]] - key = "value" - type = "float64" - - diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/moderate_burn.toml b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/moderate_burn.toml deleted file mode 100644 index ec193278a..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/moderate_burn.toml +++ /dev/null @@ -1,30 +0,0 @@ -channel_buffer_size = 100 - -[write] - concurrency = 10 - batch_size = 10000 - batch_interval = "1s" - database = "stress" - precision = "n" - address = "localhost:8086" - reset_database = true - -[[series]] - tick = "5s" - jitter = true - point_count = 10000 # number of points that will be written for each of the series - measurement = "cpu" - series_count = 100000 - - [[series.tag]] - key = "host" - value = "server" - - [[series.tag]] - key = "location" - value = "loc" - - [[series.field]] - key = "value" - type = "float64" - diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/template.toml b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/template.toml deleted file mode 100644 index b188ef4d1..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/examples/template.toml +++ /dev/null @@ -1,114 +0,0 @@ -# Set the buffer size for the channel -# that sends points to the influxdb -# client -channel_buffer_size = 100000 - -# Configuration settings for the -# stress test -[write] - # How many concurrent writers to the db - concurrency = 10 - # Size of batches that are sent to db - batch_size = 5000 - # Interval between each batch - batch_interval = "0s" - # Database that is being written to - database = "stress" - # Precision of points that are being written - precision = "n" - # Address of the Influxdb instance - address = "localhost:8086" - # Drop and Create new DB - reset_database = true - # The date for the first point that is written into influx - start_date = "2006-Jan-02" - -# Describes the schema for series that will be -# written -[[series]] - # How much time between each timestamp - tick = "5s" - # Randomize timestamp a bit - jitter = false - # number of points that will be written for each of the series - point_count = 1000 - # name of the measurement that will be written - measurement = "cpu" - series_count = 10000 - - # number of "generic" tags on a series (e.g. tag-key-1=tag-value, ... ,tag-key-20=tag-value) - # tag_count = 20 - - # Defines a tag for a series - [[series.tag]] - key = "host" - value = "idk" - - [[series.tag]] - key = "location" - value = "lame" - - # Defines a field for a series - [[series.field]] - key = "value" - type = "float64" - - [[series.field]] - key = "percent" - type = "int" - - [[series.field]] - key = "idk" - type = "bool" - - [[series.field]] - key = "default" - -[[series]] - tick = "1ns" - point_count = 100 # number of points that will be written for each of the series - measurement = "mem" - series_count = 100000 - - [[series.tag]] - key = "host" - value = "idk" - - [[series.tag]] - key = "location" - value = "lame" - - [[series.field]] - key = "value" - type = "float64" - - [[series.field]] - key = "loc" - type = "float64" - - [[series.field]] - key = "sunny" - type = "bool" - - [[series.field]] - key = "idk" - type = "int" - -# Generates queries of the form -# SELECT aggregates(values) FROM measurements WHERE time > current_timespan - offset -[measurement_query] - enabled = true - concurrency = 10 - aggregates = ["mean", "count"] - fields = ["value"] - offset = "5h" - -# Generates queries of the form -# SELECT aggregates(values) FROM measurements WHERE tag-key='tag-values-1' -[series_query] - enabled = true - concurrency = 1 - aggregates = ["mean", "count"] - fields = ["value"] - # Interval between queries - interval = "50ms" diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go deleted file mode 100644 index 608a33dc8..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go +++ /dev/null @@ -1,128 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "runtime" - "sort" - "time" - - "github.com/influxdb/influxdb/stress" -) - -var ( - batchSize = flag.Int("batchsize", 0, "number of points per batch") - concurrency = flag.Int("concurrency", 0, "number of simultaneous writes to run") - batchInterval = flag.Duration("batchinterval", 0*time.Second, "duration between batches") - database = flag.String("database", "", "name of database") - address = flag.String("addr", "", "IP address and port of database (e.g., localhost:8086)") - precision = flag.String("precision", "", "The precision that points in the database will be with") - test = flag.String("test", "", "The stress test file") -) - -func main() { - var cfg *runner.Config - var err error - - runtime.GOMAXPROCS(runtime.NumCPU()) - flag.Parse() - - if *test == "" { - fmt.Println("'-test' flag is required") - return - } - - cfg, err = runner.DecodeFile(*test) - if err != nil { - fmt.Println(err) - return - } - - if *batchSize != 0 { - cfg.Write.BatchSize = *batchSize - } - - if *concurrency != 0 { - cfg.Write.Concurrency = *concurrency - } - - if *batchInterval != 0*time.Second { - cfg.Write.BatchInterval = batchInterval.String() - } - - if *database != "" { - cfg.Write.Database = *database - } - - if *address != "" { - cfg.Write.Address = *address - } - - if *precision != "" { - cfg.Write.Precision = *precision - } - - d := make(chan struct{}) - seriesQueryResults := make(chan runner.QueryResults) - - if cfg.SeriesQuery.Enabled { - go runner.SeriesQuery(cfg, d, seriesQueryResults) - } - - measurementQueryResults := make(chan runner.QueryResults) - - ts := make(chan time.Time) - if cfg.MeasurementQuery.Enabled { - go runner.MeasurementQuery(cfg, ts, measurementQueryResults) - } - - // Get the stress results - totalPoints, failedRequests, responseTimes, timer := runner.Run(cfg, d, ts) - - sort.Sort(sort.Reverse(sort.Interface(responseTimes))) - - total := int64(0) - for _, t := range responseTimes { - total += int64(t.Value) - } - mean := total / int64(len(responseTimes)) - - fmt.Printf("Wrote %d points at average rate of %.0f\n", totalPoints, float64(totalPoints)/timer.Elapsed().Seconds()) - fmt.Printf("%d requests failed for %d total points that didn't get posted.\n", failedRequests, failedRequests**batchSize) - fmt.Println("Average response time: ", time.Duration(mean)) - fmt.Println("Slowest response times:") - for _, r := range responseTimes[:100] { - fmt.Println(time.Duration(r.Value)) - } - - // Get series query results - if cfg.SeriesQuery.Enabled { - qrs := <-seriesQueryResults - - queryTotal := int64(0) - for _, qt := range qrs.ResponseTimes { - queryTotal += int64(qt.Value) - } - seriesQueryMean := queryTotal / int64(len(qrs.ResponseTimes)) - - fmt.Printf("Queried Series %d times with a average response time of %v milliseconds\n", qrs.TotalQueries, time.Duration(seriesQueryMean).Seconds()*1000) - - } - - // Get measurement query results - if cfg.MeasurementQuery.Enabled { - qrs := <-measurementQueryResults - - queryTotal := int64(0) - for _, qt := range qrs.ResponseTimes { - queryTotal += int64(qt.Value) - } - seriesQueryMean := queryTotal / int64(len(qrs.ResponseTimes)) - - fmt.Printf("Queried Measurement %d times with a average response time of %v milliseconds\n", qrs.TotalQueries, time.Duration(seriesQueryMean).Seconds()*1000) - - } - - return - -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup.go deleted file mode 100644 index c88652f75..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup.go +++ /dev/null @@ -1,170 +0,0 @@ -package backup - -import ( - "encoding/json" - "errors" - "flag" - "fmt" - "io" - "log" - "net" - "os" - - "github.com/influxdb/influxdb/services/snapshotter" - "github.com/influxdb/influxdb/snapshot" -) - -// Suffix is a suffix added to the backup while it's in-process. -const Suffix = ".pending" - -// Command represents the program execution for "influxd backup". -type Command struct { - // The logger passed to the ticker during execution. - Logger *log.Logger - - // Standard input/output, overridden for testing. - Stderr io.Writer -} - -// NewCommand returns a new instance of Command with default settings. -func NewCommand() *Command { - return &Command{ - Stderr: os.Stderr, - } -} - -// Run executes the program. -func (cmd *Command) Run(args ...string) error { - // Set up logger. - cmd.Logger = log.New(cmd.Stderr, "", log.LstdFlags) - cmd.Logger.Printf("influxdb backup") - - // Parse command line arguments. - host, path, err := cmd.parseFlags(args) - if err != nil { - return err - } - - // Retrieve snapshot from local file. - m, err := snapshot.ReadFileManifest(path) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("read file snapshot: %s", err) - } - - // Determine temporary path to download to. - tmppath := path + Suffix - - // Calculate path of next backup file. - // This uses the path if it doesn't exist. - // Otherwise it appends an autoincrementing number. - path, err = cmd.nextPath(path) - if err != nil { - return fmt.Errorf("next path: %s", err) - } - - // Retrieve snapshot. - if err := cmd.download(host, m, tmppath); err != nil { - return fmt.Errorf("download: %s", err) - } - - // Rename temporary file to final path. - if err := os.Rename(tmppath, path); err != nil { - return fmt.Errorf("rename: %s", err) - } - - // TODO: Check file integrity. - - // Notify user of completion. - cmd.Logger.Println("backup complete") - - return nil -} - -// parseFlags parses and validates the command line arguments. -func (cmd *Command) parseFlags(args []string) (host string, path string, err error) { - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.StringVar(&host, "host", "localhost:8088", "") - fs.SetOutput(cmd.Stderr) - fs.Usage = cmd.printUsage - if err := fs.Parse(args); err != nil { - return "", "", err - } - - // Ensure that only one arg is specified. - if fs.NArg() == 0 { - return "", "", errors.New("snapshot path required") - } else if fs.NArg() != 1 { - return "", "", errors.New("only one snapshot path allowed") - } - path = fs.Arg(0) - - return host, path, nil -} - -// nextPath returns the next file to write to. -func (cmd *Command) nextPath(path string) (string, error) { - // Use base path if it doesn't exist. - if _, err := os.Stat(path); os.IsNotExist(err) { - return path, nil - } else if err != nil { - return "", err - } - - // Otherwise iterate through incremental files until one is available. - for i := 0; ; i++ { - s := fmt.Sprintf(path+".%d", i) - if _, err := os.Stat(s); os.IsNotExist(err) { - return s, nil - } else if err != nil { - return "", err - } - } -} - -// download downloads a snapshot from a host to a given path. -func (cmd *Command) download(host string, m *snapshot.Manifest, path string) error { - // Create local file to write to. - f, err := os.Create(path) - if err != nil { - return fmt.Errorf("open temp file: %s", err) - } - defer f.Close() - - // Connect to snapshotter service. - conn, err := net.Dial("tcp", host) - if err != nil { - return err - } - defer conn.Close() - - // Send snapshotter marker byte. - if _, err := conn.Write([]byte{snapshotter.MuxHeader}); err != nil { - return fmt.Errorf("write snapshot header byte: %s", err) - } - - // Write the manifest we currently have. - if err := json.NewEncoder(conn).Encode(m); err != nil { - return fmt.Errorf("encode snapshot manifest: %s", err) - } - - // Read snapshot from the connection. - if _, err := io.Copy(f, conn); err != nil { - return fmt.Errorf("copy snapshot to file: %s", err) - } - - // FIXME(benbjohnson): Verify integrity of snapshot. - - return nil -} - -// printUsage prints the usage message to STDERR. -func (cmd *Command) printUsage() { - fmt.Fprintf(cmd.Stderr, `usage: influxd backup [flags] PATH - -backup downloads a snapshot of a data node and saves it to disk. - - -host - The host to connect to snapshot. - Defaults to 127.0.0.1:8088. -`) -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup_test.go deleted file mode 100644 index 15db96449..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package backup_test - -/* -import ( - "bytes" - "net/http" - "net/http/httptest" - "os" - "strings" - "testing" - - "github.com/influxdb/influxdb" - "github.com/influxdb/influxdb/cmd/influxd" -) - -// Ensure the backup can download from the server and save to disk. -func TestBackupCommand(t *testing.T) { - // Mock the backup endpoint. - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/data/snapshot" { - t.Fatalf("unexpected url path: %s", r.URL.Path) - } - - // Write a simple snapshot to the buffer. - sw := influxdb.NewSnapshotWriter() - sw.Snapshot = &influxdb.Snapshot{Files: []influxdb.SnapshotFile{ - {Name: "meta", Size: 5, Index: 10}, - }} - sw.FileWriters["meta"] = influxdb.NopWriteToCloser(bytes.NewBufferString("55555")) - if _, err := sw.WriteTo(w); err != nil { - t.Fatal(err) - } - })) - defer s.Close() - - // Create a temp path and remove incremental backups at the end. - path := tempfile() - defer os.Remove(path) - defer os.Remove(path + ".0") - defer os.Remove(path + ".1") - - // Execute the backup against the mock server. - for i := 0; i < 3; i++ { - if err := NewBackupCommand().Run("-host", s.URL, path); err != nil { - t.Fatal(err) - } - } - - // Verify snapshot and two incremental snapshots were written. - if _, err := os.Stat(path); err != nil { - t.Fatalf("snapshot not found: %s", err) - } else if _, err = os.Stat(path + ".0"); err != nil { - t.Fatalf("incremental snapshot(0) not found: %s", err) - } else if _, err = os.Stat(path + ".1"); err != nil { - t.Fatalf("incremental snapshot(1) not found: %s", err) - } -} - -// Ensure the backup command returns an error if flags cannot be parsed. -func TestBackupCommand_ErrFlagParse(t *testing.T) { - cmd := NewBackupCommand() - if err := cmd.Run("-bad-flag"); err == nil || err.Error() != `flag provided but not defined: -bad-flag` { - t.Fatal(err) - } else if !strings.Contains(cmd.Stderr.String(), "usage") { - t.Fatal("usage message not displayed") - } -} - -// Ensure the backup command returns an error if the host cannot be parsed. -func TestBackupCommand_ErrInvalidHostURL(t *testing.T) { - if err := NewBackupCommand().Run("-host", "http://%f"); err == nil || err.Error() != `parse host url: parse http://%f: hexadecimal escape in host` { - t.Fatal(err) - } -} - -// Ensure the backup command returns an error if the output path is not specified. -func TestBackupCommand_ErrPathRequired(t *testing.T) { - if err := NewBackupCommand().Run("-host", "//localhost"); err == nil || err.Error() != `snapshot path required` { - t.Fatal(err) - } -} - -// Ensure the backup returns an error if it cannot connect to the server. -func TestBackupCommand_ErrConnectionRefused(t *testing.T) { - // Start and immediately stop a server so we have a dead port. - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) - s.Close() - - // Execute the backup command. - path := tempfile() - defer os.Remove(path) - if err := NewBackupCommand().Run("-host", s.URL, path); err == nil || - !(strings.Contains(err.Error(), `connection refused`) || strings.Contains(err.Error(), `No connection could be made`)) { - t.Fatal(err) - } -} - -// Ensure the backup returns any non-200 status codes. -func TestBackupCommand_ErrServerError(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - })) - defer s.Close() - - // Execute the backup command. - path := tempfile() - defer os.Remove(path) - if err := NewBackupCommand().Run("-host", s.URL, path); err == nil || err.Error() != `download: snapshot error: status=500` { - t.Fatal(err) - } -} - -// BackupCommand is a test wrapper for main.BackupCommand. -type BackupCommand struct { - *main.BackupCommand - Stderr bytes.Buffer -} - -// NewBackupCommand returns a new instance of BackupCommand. -func NewBackupCommand() *BackupCommand { - cmd := &BackupCommand{BackupCommand: main.NewBackupCommand()} - cmd.BackupCommand.Stderr = &cmd.Stderr - return cmd -} -*/ diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/help/help.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/help/help.go deleted file mode 100644 index 3f6bbfb08..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/help/help.go +++ /dev/null @@ -1,46 +0,0 @@ -package help - -import ( - "fmt" - "io" - "os" - "strings" -) - -// Command displays help for command-line sub-commands. -type Command struct { - Stdout io.Writer -} - -// NewCommand returns a new instance of Command. -func NewCommand() *Command { - return &Command{ - Stdout: os.Stdout, - } -} - -// Run executes the command. -func (cmd *Command) Run(args ...string) error { - fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage)) - return nil -} - -const usage = ` -Configure and start an InfluxDB server. - -Usage: - - influxd [[command] [arguments]] - -The commands are: - - backup downloads a snapshot of a data node and saves it to disk - config display the default configuration - restore uses a snapshot of a data node to rebuild a cluster - run run node with existing configuration - version displays the InfluxDB version - -"run" is the default command. - -Use "influxd help [command]" for more information about a command. -` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/main.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/main.go deleted file mode 100644 index b528e39d3..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/main.go +++ /dev/null @@ -1,205 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "io" - "log" - "math/rand" - "os" - "os/signal" - "strings" - "syscall" - "time" - - "github.com/influxdb/influxdb/cmd/influxd/backup" - "github.com/influxdb/influxdb/cmd/influxd/help" - "github.com/influxdb/influxdb/cmd/influxd/restore" - "github.com/influxdb/influxdb/cmd/influxd/run" -) - -// These variables are populated via the Go linker. -var ( - version = "0.9" - commit string - branch string - buildTime string -) - -func init() { - // If commit, branch, or build time are not set, make that clear. - if commit == "" { - commit = "unknown" - } - if branch == "" { - branch = "unknown" - } - if buildTime == "" { - buildTime = "unknown" - } -} - -func main() { - rand.Seed(time.Now().UnixNano()) - - m := NewMain() - if err := m.Run(os.Args[1:]...); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -// Main represents the program execution. -type Main struct { - Logger *log.Logger - - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewMain return a new instance of Main. -func NewMain() *Main { - return &Main{ - Logger: log.New(os.Stderr, "[run] ", log.LstdFlags), - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run determines and runs the command specified by the CLI args. -func (m *Main) Run(args ...string) error { - name, args := ParseCommandName(args) - - // Extract name from args. - switch name { - case "", "run": - cmd := run.NewCommand() - - // Tell the server the build details. - cmd.Version = version - cmd.Commit = commit - cmd.Branch = branch - cmd.BuildTime = buildTime - - if err := cmd.Run(args...); err != nil { - return fmt.Errorf("run: %s", err) - } - - signalCh := make(chan os.Signal, 1) - signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) - m.Logger.Println("Listening for signals") - - // Block until one of the signals above is received - select { - case <-signalCh: - m.Logger.Println("Signal received, initializing clean shutdown...") - go func() { - cmd.Close() - }() - } - - // Block again until another signal is received, a shutdown timeout elapses, - // or the Command is gracefully closed - m.Logger.Println("Waiting for clean shutdown...") - select { - case <-signalCh: - m.Logger.Println("second signal received, initializing hard shutdown") - case <-time.After(time.Second * 30): - m.Logger.Println("time limit reached, initializing hard shutdown") - case <-cmd.Closed: - m.Logger.Println("server shutdown completed") - } - - // goodbye. - - case "backup": - name := backup.NewCommand() - if err := name.Run(args...); err != nil { - return fmt.Errorf("backup: %s", err) - } - case "restore": - name := restore.NewCommand() - if err := name.Run(args...); err != nil { - return fmt.Errorf("restore: %s", err) - } - case "config": - if err := run.NewPrintConfigCommand().Run(args...); err != nil { - return fmt.Errorf("config: %s", err) - } - case "version": - if err := NewVersionCommand().Run(args...); err != nil { - return fmt.Errorf("version: %s", err) - } - case "help": - if err := help.NewCommand().Run(args...); err != nil { - return fmt.Errorf("help: %s", err) - } - default: - return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'influxd help' for usage`+"\n\n", name) - } - - return nil -} - -// ParseCommandName extracts the command name and args from the args list. -func ParseCommandName(args []string) (string, []string) { - // Retrieve command name as first argument. - var name string - if len(args) > 0 && !strings.HasPrefix(args[0], "-") { - name = args[0] - } - - // Special case -h immediately following binary name - if len(args) > 0 && args[0] == "-h" { - name = "help" - } - - // If command is "help" and has an argument then rewrite args to use "-h". - if name == "help" && len(args) > 1 { - args[0], args[1] = args[1], "-h" - name = args[0] - } - - // If a named command is specified then return it with its arguments. - if name != "" { - return name, args[1:] - } - return "", args -} - -// VersionCommand represents the command executed by "influxd version". -type VersionCommand struct { - Stdout io.Writer - Stderr io.Writer -} - -// NewVersionCommand return a new instance of VersionCommand. -func NewVersionCommand() *VersionCommand { - return &VersionCommand{ - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run prints the current version and commit info. -func (cmd *VersionCommand) Run(args ...string) error { - // Parse flags in case -h is specified. - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.Usage = func() { fmt.Fprintln(cmd.Stderr, strings.TrimSpace(versionUsage)) } - if err := fs.Parse(args); err != nil { - return err - } - - // Print version info. - fmt.Fprintf(cmd.Stdout, "InfluxDB v%s (git: %s %s, built %s)\n", version, branch, commit, buildTime) - - return nil -} - -var versionUsage = ` -usage: version - - version displays the InfluxDB version, build branch and git commit hash -` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go deleted file mode 100644 index 4d22916fa..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go +++ /dev/null @@ -1,260 +0,0 @@ -package restore - -import ( - "bytes" - "errors" - "flag" - "fmt" - "io" - "net" - "os" - "path/filepath" - - "github.com/BurntSushi/toml" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/snapshot" - "github.com/influxdb/influxdb/tsdb" -) - -// Command represents the program execution for "influxd restore". -type Command struct { - Stdout io.Writer - Stderr io.Writer -} - -// NewCommand returns a new instance of Command with default settings. -func NewCommand() *Command { - return &Command{ - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run executes the program. -func (cmd *Command) Run(args ...string) error { - config, path, err := cmd.parseFlags(args) - if err != nil { - return err - } - - return cmd.Restore(config, path) -} - -// Restore restores a database snapshot -func (cmd *Command) Restore(config *Config, path string) error { - // Remove meta and data directories. - if err := os.RemoveAll(config.Meta.Dir); err != nil { - return fmt.Errorf("remove meta dir: %s", err) - } else if err := os.RemoveAll(config.Data.Dir); err != nil { - return fmt.Errorf("remove data dir: %s", err) - } - - // Open snapshot file and all incremental backups. - mr, files, err := snapshot.OpenFileMultiReader(path) - if err != nil { - return fmt.Errorf("open multireader: %s", err) - } - defer closeAll(files) - - // Unpack files from archive. - if err := cmd.unpack(mr, config); err != nil { - return fmt.Errorf("unpack: %s", err) - } - - // Notify user of completion. - fmt.Fprintf(os.Stdout, "restore complete using %s", path) - return nil -} - -// parseFlags parses and validates the command line arguments. -func (cmd *Command) parseFlags(args []string) (*Config, string, error) { - fs := flag.NewFlagSet("", flag.ContinueOnError) - configPath := fs.String("config", "", "") - fs.SetOutput(cmd.Stderr) - fs.Usage = cmd.printUsage - if err := fs.Parse(args); err != nil { - return nil, "", err - } - - // Parse configuration file from disk. - if *configPath == "" { - return nil, "", fmt.Errorf("config required") - } - - // Parse config. - config := Config{ - Meta: meta.NewConfig(), - Data: tsdb.NewConfig(), - } - if _, err := toml.DecodeFile(*configPath, &config); err != nil { - return nil, "", err - } - - // Require output path. - path := fs.Arg(0) - if path == "" { - return nil, "", fmt.Errorf("snapshot path required") - } - - return &config, path, nil -} - -func closeAll(a []io.Closer) { - for _, c := range a { - _ = c.Close() - } -} - -// unpack expands the files in the snapshot archive into a directory. -func (cmd *Command) unpack(mr *snapshot.MultiReader, config *Config) error { - // Loop over files and extract. - for { - // Read entry header. - sf, err := mr.Next() - if err == io.EOF { - break - } else if err != nil { - return fmt.Errorf("next: entry=%s, err=%s", sf.Name, err) - } - - // Log progress. - fmt.Fprintf(os.Stdout, "unpacking: %s (%d bytes)\n", sf.Name, sf.Size) - - // Handle meta and tsdb files separately. - switch sf.Name { - case "meta": - if err := cmd.unpackMeta(mr, sf, config); err != nil { - return fmt.Errorf("meta: %s", err) - } - default: - if err := cmd.unpackData(mr, sf, config); err != nil { - return fmt.Errorf("data: %s", err) - } - } - } - - return nil -} - -// unpackMeta reads the metadata from the snapshot and initializes a raft -// cluster and replaces the root metadata. -func (cmd *Command) unpackMeta(mr *snapshot.MultiReader, sf snapshot.File, config *Config) error { - // Read meta into buffer. - var buf bytes.Buffer - if _, err := io.CopyN(&buf, mr, sf.Size); err != nil { - return fmt.Errorf("copy: %s", err) - } - - // Unpack into metadata. - var data meta.Data - if err := data.UnmarshalBinary(buf.Bytes()); err != nil { - return fmt.Errorf("unmarshal: %s", err) - } - - // Copy meta config and remove peers so it starts in single mode. - c := config.Meta - c.Peers = nil - - // Initialize meta store. - store := meta.NewStore(config.Meta) - store.RaftListener = newNopListener() - store.ExecListener = newNopListener() - store.RPCListener = newNopListener() - - // Determine advertised address. - _, port, err := net.SplitHostPort(config.Meta.BindAddress) - if err != nil { - return fmt.Errorf("split bind address: %s", err) - } - hostport := net.JoinHostPort(config.Meta.Hostname, port) - - // Resolve address. - addr, err := net.ResolveTCPAddr("tcp", hostport) - if err != nil { - return fmt.Errorf("resolve tcp: addr=%s, err=%s", hostport, err) - } - store.Addr = addr - store.RemoteAddr = addr - - // Open the meta store. - if err := store.Open(); err != nil { - return fmt.Errorf("open store: %s", err) - } - defer store.Close() - - // Wait for the store to be ready or error. - select { - case <-store.Ready(): - case err := <-store.Err(): - return err - } - - // Force set the full metadata. - if err := store.SetData(&data); err != nil { - return fmt.Errorf("set data: %s", err) - } - - return nil -} - -func (cmd *Command) unpackData(mr *snapshot.MultiReader, sf snapshot.File, config *Config) error { - path := filepath.Join(config.Data.Dir, sf.Name) - // Create parent directory for output file. - if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { - return fmt.Errorf("mkdir: entry=%s, err=%s", sf.Name, err) - } - - // Create output file. - f, err := os.Create(path) - if err != nil { - return fmt.Errorf("create: entry=%s, err=%s", sf.Name, err) - } - defer f.Close() - - // Copy contents from reader. - if _, err := io.CopyN(f, mr, sf.Size); err != nil { - return fmt.Errorf("copy: entry=%s, err=%s", sf.Name, err) - } - - return nil -} - -// printUsage prints the usage message to STDERR. -func (cmd *Command) printUsage() { - fmt.Fprintf(cmd.Stderr, `usage: influxd restore [flags] PATH - -restore uses a snapshot of a data node to rebuild a cluster. - - -config - Set the path to the configuration file. -`) -} - -// Config represents a partial config for rebuilding the server. -type Config struct { - Meta *meta.Config `toml:"meta"` - Data tsdb.Config `toml:"data"` -} - -type nopListener struct { - closing chan struct{} -} - -func newNopListener() *nopListener { - return &nopListener{make(chan struct{})} -} - -func (ln *nopListener) Accept() (net.Conn, error) { - <-ln.closing - return nil, errors.New("listener closing") -} - -func (ln *nopListener) Close() error { - if ln.closing != nil { - close(ln.closing) - ln.closing = nil - } - return nil -} - -func (ln *nopListener) Addr() net.Addr { return nil } diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore_test.go deleted file mode 100644 index f9d974212..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package restore_test - -/* -import ( - "bytes" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "testing" - "time" - - main "github.com/influxdb/influxdb/cmd/influxd" - "github.com/influxdb/influxdb/tsdb" -) - -func newConfig(path string, port int) main.Config { - config := main.NewConfig() - config.Port = port - config.Broker.Enabled = true - config.Broker.Dir = filepath.Join(path, "broker") - - config.Data.Enabled = true - config.Data.Dir = filepath.Join(path, "data") - return *config -} - -// Ensure the restore command can expand a snapshot and bootstrap a broker. -func TestRestoreCommand(t *testing.T) { - if testing.Short() { - t.Skip("skipping TestRestoreCommand") - } - - now := time.Now() - - // Create root path to server. - path := tempfile() - defer os.Remove(path) - - // Parse configuration. - config := newConfig(path, 8900) - - // Start server. - cmd := main.NewRunCommand() - node := cmd.Open(&config, "") - if node.Broker == nil { - t.Fatal("cannot run broker") - } else if node.DataNode == nil { - t.Fatal("cannot run server") - } - b := node.Broker - s := node.DataNode - - // Create data. - if err := s.CreateDatabase("db"); err != nil { - t.Fatalf("cannot create database: %s", err) - } - if index, err := s.WriteSeries("db", "default", []models.Point{tsdb.NewPoint("cpu", nil, map[string]interface{}{"value": float64(100)}, now)}); err != nil { - t.Fatalf("cannot write series: %s", err) - } else if err = s.Sync(1, index); err != nil { - t.Fatalf("shard sync: %s", err) - } - - // Create snapshot writer. - sw, err := s.CreateSnapshotWriter() - if err != nil { - t.Fatalf("create snapshot writer: %s", err) - } - - // Snapshot to file. - sspath := tempfile() - f, err := os.Create(sspath) - if err != nil { - t.Fatal(err) - } - sw.WriteTo(f) - f.Close() - - // Stop server. - node.Close() - - // Remove data & broker directories. - if err := os.RemoveAll(path); err != nil { - t.Fatalf("remove: %s", err) - } - - // Execute the restore. - if err := NewRestoreCommand().Restore(&config, sspath); err != nil { - t.Fatal(err) - } - - // Rewrite config to a new port and re-parse. - config = newConfig(path, 8910) - - // Restart server. - cmd = main.NewRunCommand() - node = cmd.Open(&config, "") - if b == nil { - t.Fatal("cannot run broker") - } else if s == nil { - t.Fatal("cannot run server") - } - b = node.Broker - s = node.DataNode - - // Write new data. - if err := s.CreateDatabase("newdb"); err != nil { - t.Fatalf("cannot create new database: %s", err) - } - if index, err := s.WriteSeries("newdb", "default", []models.Point{tsdb.NewPoint("mem", nil, map[string]interface{}{"value": float64(1000)}, now)}); err != nil { - t.Fatalf("cannot write new series: %s", err) - } else if err = s.Sync(2, index); err != nil { - t.Fatalf("shard sync: %s", err) - } - - // Read series data. - if v, err := s.ReadSeries("db", "default", "cpu", nil, now); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(v, map[string]interface{}{"value": float64(100)}) { - t.Fatalf("read series(0) mismatch: %#v", v) - } - - // Read new series data. - if v, err := s.ReadSeries("newdb", "default", "mem", nil, now); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(v, map[string]interface{}{"value": float64(1000)}) { - t.Fatalf("read series(1) mismatch: %#v", v) - } - - // Stop server. - node.Close() -} - -// RestoreCommand is a test wrapper for main.RestoreCommand. -type RestoreCommand struct { - *main.RestoreCommand - Stderr bytes.Buffer -} - -// NewRestoreCommand returns a new instance of RestoreCommand. -func NewRestoreCommand() *RestoreCommand { - cmd := &RestoreCommand{RestoreCommand: main.NewRestoreCommand()} - cmd.RestoreCommand.Stderr = &cmd.Stderr - return cmd -} - -// MustReadFile reads data from a file. Panic on error. -func MustReadFile(filename string) []byte { - b, err := ioutil.ReadFile(filename) - if err != nil { - panic(err.Error()) - } - return b -} -*/ diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/command.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/command.go deleted file mode 100644 index e7974a3f4..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/command.go +++ /dev/null @@ -1,243 +0,0 @@ -package run - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "time" - - "github.com/BurntSushi/toml" -) - -const logo = ` - 8888888 .d888 888 8888888b. 888888b. - 888 d88P" 888 888 "Y88b 888 "88b - 888 888 888 888 888 888 .88P - 888 88888b. 888888 888 888 888 888 888 888 888 8888888K. - 888 888 "88b 888 888 888 888 Y8bd8P' 888 888 888 "Y88b - 888 888 888 888 888 888 888 X88K 888 888 888 888 - 888 888 888 888 888 Y88b 888 .d8""8b. 888 .d88P 888 d88P - 8888888 888 888 888 888 "Y88888 888 888 8888888P" 8888888P" - -` - -// Command represents the command executed by "influxd run". -type Command struct { - Version string - Branch string - Commit string - BuildTime string - - closing chan struct{} - Closed chan struct{} - - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer - - Server *Server -} - -// NewCommand return a new instance of Command. -func NewCommand() *Command { - return &Command{ - closing: make(chan struct{}), - Closed: make(chan struct{}), - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run parses the config from args and runs the server. -func (cmd *Command) Run(args ...string) error { - // Parse the command line flags. - options, err := cmd.ParseFlags(args...) - if err != nil { - return err - } - - // Print sweet InfluxDB logo. - fmt.Print(logo) - - // Mark start-up in log. - log.Printf("InfluxDB starting, version %s, branch %s, commit %s, built %s", - cmd.Version, cmd.Branch, cmd.Commit, cmd.BuildTime) - log.Printf("Go version %s, GOMAXPROCS set to %d", runtime.Version(), runtime.GOMAXPROCS(0)) - - // Set parallelism. - runtime.GOMAXPROCS(runtime.NumCPU()) - - // Write the PID file. - if err := cmd.writePIDFile(options.PIDFile); err != nil { - return fmt.Errorf("write pid file: %s", err) - } - - // Turn on block profiling to debug stuck databases - runtime.SetBlockProfileRate(int(1 * time.Second)) - - // Parse config - config, err := cmd.ParseConfig(options.ConfigPath) - if err != nil { - return fmt.Errorf("parse config: %s", err) - } - - // Apply any environment variables on top of the parsed config - if err := config.ApplyEnvOverrides(); err != nil { - return fmt.Errorf("apply env config: %v", err) - } - - // Override config hostname if specified in the command line args. - if options.Hostname != "" { - config.Meta.Hostname = options.Hostname - } - - if options.Join != "" { - config.Meta.Peers = strings.Split(options.Join, ",") - } - - // Validate the configuration. - if err := config.Validate(); err != nil { - return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err) - } - - // Create server from config and start it. - buildInfo := &BuildInfo{ - Version: cmd.Version, - Commit: cmd.Commit, - Branch: cmd.Branch, - Time: cmd.BuildTime, - } - s, err := NewServer(config, buildInfo) - if err != nil { - return fmt.Errorf("create server: %s", err) - } - s.CPUProfile = options.CPUProfile - s.MemProfile = options.MemProfile - if err := s.Open(); err != nil { - return fmt.Errorf("open server: %s", err) - } - cmd.Server = s - - // Begin monitoring the server's error channel. - go cmd.monitorServerErrors() - - return nil -} - -// Close shuts down the server. -func (cmd *Command) Close() error { - defer close(cmd.Closed) - close(cmd.closing) - if cmd.Server != nil { - return cmd.Server.Close() - } - return nil -} - -func (cmd *Command) monitorServerErrors() { - logger := log.New(cmd.Stderr, "", log.LstdFlags) - for { - select { - case err := <-cmd.Server.Err(): - logger.Println(err) - case <-cmd.closing: - return - } - } -} - -// ParseFlags parses the command line flags from args and returns an options set. -func (cmd *Command) ParseFlags(args ...string) (Options, error) { - var options Options - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.StringVar(&options.ConfigPath, "config", "", "") - fs.StringVar(&options.PIDFile, "pidfile", "", "") - fs.StringVar(&options.Hostname, "hostname", "", "") - fs.StringVar(&options.Join, "join", "", "") - fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") - fs.StringVar(&options.MemProfile, "memprofile", "", "") - fs.Usage = func() { fmt.Fprintln(cmd.Stderr, usage) } - if err := fs.Parse(args); err != nil { - return Options{}, err - } - return options, nil -} - -// writePIDFile writes the process ID to path. -func (cmd *Command) writePIDFile(path string) error { - // Ignore if path is not set. - if path == "" { - return nil - } - - // Ensure the required directory structure exists. - err := os.MkdirAll(filepath.Dir(path), 0777) - if err != nil { - return fmt.Errorf("mkdir: %s", err) - } - - // Retrieve the PID and write it. - pid := strconv.Itoa(os.Getpid()) - if err := ioutil.WriteFile(path, []byte(pid), 0666); err != nil { - return fmt.Errorf("write file: %s", err) - } - - return nil -} - -// ParseConfig parses the config at path. -// Returns a demo configuration if path is blank. -func (cmd *Command) ParseConfig(path string) (*Config, error) { - // Use demo configuration if no config path is specified. - if path == "" { - log.Println("no configuration provided, using default settings") - return NewDemoConfig() - } - - log.Printf("Using configuration at: %s\n", path) - - config := NewConfig() - if _, err := toml.DecodeFile(path, &config); err != nil { - return nil, err - } - - return config, nil -} - -var usage = `usage: run [flags] - -run starts the broker and data node server. If this is the first time running -the command then a new cluster will be initialized unless the -join argument -is used. - - -config - Set the path to the configuration file. - - -hostname - Override the hostname, the 'hostname' configuration - option will be overridden. - - -join - Joins the server to an existing cluster. - - -pidfile - Write process ID to a file. -` - -// Options represents the command line options that can be parsed. -type Options struct { - ConfigPath string - PIDFile string - Hostname string - Join string - CPUProfile string - MemProfile string -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config.go deleted file mode 100644 index 92474baaa..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config.go +++ /dev/null @@ -1,233 +0,0 @@ -package run - -import ( - "errors" - "fmt" - "os" - "os/user" - "path/filepath" - "reflect" - "strconv" - "strings" - "time" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/monitor" - "github.com/influxdb/influxdb/services/admin" - "github.com/influxdb/influxdb/services/collectd" - "github.com/influxdb/influxdb/services/continuous_querier" - "github.com/influxdb/influxdb/services/graphite" - "github.com/influxdb/influxdb/services/hh" - "github.com/influxdb/influxdb/services/httpd" - "github.com/influxdb/influxdb/services/opentsdb" - "github.com/influxdb/influxdb/services/precreator" - "github.com/influxdb/influxdb/services/registration" - "github.com/influxdb/influxdb/services/retention" - "github.com/influxdb/influxdb/services/subscriber" - "github.com/influxdb/influxdb/services/udp" - "github.com/influxdb/influxdb/tsdb" -) - -// Config represents the configuration format for the influxd binary. -type Config struct { - Meta *meta.Config `toml:"meta"` - Data tsdb.Config `toml:"data"` - Cluster cluster.Config `toml:"cluster"` - Retention retention.Config `toml:"retention"` - Registration registration.Config `toml:"registration"` - Precreator precreator.Config `toml:"shard-precreation"` - - Admin admin.Config `toml:"admin"` - Monitor monitor.Config `toml:"monitor"` - Subscriber subscriber.Config `toml:"subscriber"` - HTTPD httpd.Config `toml:"http"` - Graphites []graphite.Config `toml:"graphite"` - Collectd collectd.Config `toml:"collectd"` - OpenTSDB opentsdb.Config `toml:"opentsdb"` - UDPs []udp.Config `toml:"udp"` - - // Snapshot SnapshotConfig `toml:"snapshot"` - ContinuousQuery continuous_querier.Config `toml:"continuous_queries"` - - HintedHandoff hh.Config `toml:"hinted-handoff"` - - // Server reporting - ReportingDisabled bool `toml:"reporting-disabled"` -} - -// NewConfig returns an instance of Config with reasonable defaults. -func NewConfig() *Config { - c := &Config{} - c.Meta = meta.NewConfig() - c.Data = tsdb.NewConfig() - c.Cluster = cluster.NewConfig() - c.Registration = registration.NewConfig() - c.Precreator = precreator.NewConfig() - - c.Admin = admin.NewConfig() - c.Monitor = monitor.NewConfig() - c.Subscriber = subscriber.NewConfig() - c.HTTPD = httpd.NewConfig() - c.Collectd = collectd.NewConfig() - c.OpenTSDB = opentsdb.NewConfig() - - c.ContinuousQuery = continuous_querier.NewConfig() - c.Retention = retention.NewConfig() - c.HintedHandoff = hh.NewConfig() - - return c -} - -// NewDemoConfig returns the config that runs when no config is specified. -func NewDemoConfig() (*Config, error) { - c := NewConfig() - - var homeDir string - // By default, store meta and data files in current users home directory - u, err := user.Current() - if err == nil { - homeDir = u.HomeDir - } else if os.Getenv("HOME") != "" { - homeDir = os.Getenv("HOME") - } else { - return nil, fmt.Errorf("failed to determine current user for storage") - } - - c.Meta.Dir = filepath.Join(homeDir, ".influxdb/meta") - c.Data.Dir = filepath.Join(homeDir, ".influxdb/data") - c.HintedHandoff.Dir = filepath.Join(homeDir, ".influxdb/hh") - c.Data.WALDir = filepath.Join(homeDir, ".influxdb/wal") - - c.HintedHandoff.Enabled = true - c.Admin.Enabled = true - - return c, nil -} - -// Validate returns an error if the config is invalid. -func (c *Config) Validate() error { - if c.Meta.Dir == "" { - return errors.New("Meta.Dir must be specified") - } else if c.HintedHandoff.Enabled && c.HintedHandoff.Dir == "" { - return errors.New("HintedHandoff.Dir must be specified") - } - - if err := c.Data.Validate(); err != nil { - return err - } - - for _, g := range c.Graphites { - if err := g.Validate(); err != nil { - return fmt.Errorf("invalid graphite config: %v", err) - } - } - return nil -} - -// ApplyEnvOverrides apply the environment configuration on top of the config. -func (c *Config) ApplyEnvOverrides() error { - return c.applyEnvOverrides("INFLUXDB", reflect.ValueOf(c)) -} - -func (c *Config) applyEnvOverrides(prefix string, spec reflect.Value) error { - // If we have a pointer, dereference it - s := spec - if spec.Kind() == reflect.Ptr { - s = spec.Elem() - } - - // Make sure we have struct - if s.Kind() != reflect.Struct { - return nil - } - - typeOfSpec := s.Type() - for i := 0; i < s.NumField(); i++ { - f := s.Field(i) - // Get the toml tag to determine what env var name to use - configName := typeOfSpec.Field(i).Tag.Get("toml") - // Replace hyphens with underscores to avoid issues with shells - configName = strings.Replace(configName, "-", "_", -1) - fieldKey := typeOfSpec.Field(i).Name - - // Skip any fields that we cannot set - if f.CanSet() || f.Kind() == reflect.Slice { - - // Use the upper-case prefix and toml name for the env var - key := strings.ToUpper(configName) - if prefix != "" { - key = strings.ToUpper(fmt.Sprintf("%s_%s", prefix, configName)) - } - value := os.Getenv(key) - - // If the type is s slice, apply to each using the index as a suffix - // e.g. GRAPHITE_0 - if f.Kind() == reflect.Slice || f.Kind() == reflect.Array { - for i := 0; i < f.Len(); i++ { - if err := c.applyEnvOverrides(fmt.Sprintf("%s_%d", key, i), f.Index(i)); err != nil { - return err - } - } - continue - } - - // If it's a sub-config, recursively apply - if f.Kind() == reflect.Struct || f.Kind() == reflect.Ptr { - if err := c.applyEnvOverrides(key, f); err != nil { - return err - } - continue - } - - // Skip any fields we don't have a value to set - if value == "" { - continue - } - - switch f.Kind() { - case reflect.String: - f.SetString(value) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - - var intValue int64 - - // Handle toml.Duration - if f.Type().Name() == "Duration" { - dur, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldKey, f.Type().String(), value) - } - intValue = dur.Nanoseconds() - } else { - var err error - intValue, err = strconv.ParseInt(value, 0, f.Type().Bits()) - if err != nil { - return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldKey, f.Type().String(), value) - } - } - - f.SetInt(intValue) - case reflect.Bool: - boolValue, err := strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldKey, f.Type().String(), value) - - } - f.SetBool(boolValue) - case reflect.Float32, reflect.Float64: - floatValue, err := strconv.ParseFloat(value, f.Type().Bits()) - if err != nil { - return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldKey, f.Type().String(), value) - - } - f.SetFloat(floatValue) - default: - if err := c.applyEnvOverrides(key, f); err != nil { - return err - } - } - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go deleted file mode 100644 index 7bb0d4e5b..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go +++ /dev/null @@ -1,83 +0,0 @@ -package run - -import ( - "flag" - "fmt" - "io" - "os" - - "github.com/BurntSushi/toml" -) - -// PrintConfigCommand represents the command executed by "influxd config". -type PrintConfigCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewPrintConfigCommand return a new instance of PrintConfigCommand. -func NewPrintConfigCommand() *PrintConfigCommand { - return &PrintConfigCommand{ - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run parses and prints the current config loaded. -func (cmd *PrintConfigCommand) Run(args ...string) error { - // Parse command flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - configPath := fs.String("config", "", "") - hostname := fs.String("hostname", "", "") - fs.Usage = func() { fmt.Fprintln(cmd.Stderr, printConfigUsage) } - if err := fs.Parse(args); err != nil { - return err - } - - // Parse config from path. - config, err := cmd.parseConfig(*configPath) - if err != nil { - return fmt.Errorf("parse config: %s", err) - } - - // Apply any environment variables on top of the parsed config - if err := config.ApplyEnvOverrides(); err != nil { - return fmt.Errorf("apply env config: %v", err) - } - - // Override config properties. - if *hostname != "" { - config.Meta.Hostname = *hostname - } - - // Validate the configuration. - if err := config.Validate(); err != nil { - return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err) - } - - toml.NewEncoder(cmd.Stdout).Encode(config) - fmt.Fprint(cmd.Stdout, "\n") - - return nil -} - -// ParseConfig parses the config at path. -// Returns a demo configuration if path is blank. -func (cmd *PrintConfigCommand) parseConfig(path string) (*Config, error) { - if path == "" { - return NewDemoConfig() - } - - config := NewConfig() - if _, err := toml.DecodeFile(path, &config); err != nil { - return nil, err - } - return config, nil -} - -var printConfigUsage = `usage: config - - config displays the default configuration -` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go deleted file mode 100644 index c3312ef96..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package run_test - -import ( - "os" - "testing" - - "github.com/BurntSushi/toml" - "github.com/influxdb/influxdb/cmd/influxd/run" -) - -// Ensure the configuration can be parsed. -func TestConfig_Parse(t *testing.T) { - // Parse configuration. - var c run.Config - if _, err := toml.Decode(` -[meta] -dir = "/tmp/meta" - -[data] -dir = "/tmp/data" - -[cluster] - -[admin] -bind-address = ":8083" - -[http] -bind-address = ":8087" - -[[graphite]] -protocol = "udp" - -[[graphite]] -protocol = "tcp" - -[collectd] -bind-address = ":1000" - -[opentsdb] -bind-address = ":2000" - -[[udp]] -bind-address = ":4444" - -[monitoring] -enabled = true - -[subscriber] -enabled = true - -[continuous_queries] -enabled = true -`, &c); err != nil { - t.Fatal(err) - } - - // Validate configuration. - if c.Meta.Dir != "/tmp/meta" { - t.Fatalf("unexpected meta dir: %s", c.Meta.Dir) - } else if c.Data.Dir != "/tmp/data" { - t.Fatalf("unexpected data dir: %s", c.Data.Dir) - } else if c.Admin.BindAddress != ":8083" { - t.Fatalf("unexpected admin bind address: %s", c.Admin.BindAddress) - } else if c.HTTPD.BindAddress != ":8087" { - t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress) - } else if len(c.Graphites) != 2 { - t.Fatalf("unexpected graphites count: %d", len(c.Graphites)) - } else if c.Graphites[0].Protocol != "udp" { - t.Fatalf("unexpected graphite protocol(0): %s", c.Graphites[0].Protocol) - } else if c.Graphites[1].Protocol != "tcp" { - t.Fatalf("unexpected graphite protocol(1): %s", c.Graphites[1].Protocol) - } else if c.Collectd.BindAddress != ":1000" { - t.Fatalf("unexpected collectd bind address: %s", c.Collectd.BindAddress) - } else if c.OpenTSDB.BindAddress != ":2000" { - t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDB.BindAddress) - } else if c.UDPs[0].BindAddress != ":4444" { - t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress) - } else if c.Subscriber.Enabled != true { - t.Fatalf("unexpected subscriber enabled: %v", c.Subscriber.Enabled) - } else if c.ContinuousQuery.Enabled != true { - t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled) - } -} - -// Ensure the configuration can be parsed. -func TestConfig_Parse_EnvOverride(t *testing.T) { - // Parse configuration. - var c run.Config - if _, err := toml.Decode(` -[meta] -dir = "/tmp/meta" - -[data] -dir = "/tmp/data" - -[cluster] - -[admin] -bind-address = ":8083" - -[http] -bind-address = ":8087" - -[[graphite]] -protocol = "udp" - -[[graphite]] -protocol = "tcp" - -[collectd] -bind-address = ":1000" - -[opentsdb] -bind-address = ":2000" - -[[udp]] -bind-address = ":4444" - -[monitoring] -enabled = true - -[continuous_queries] -enabled = true -`, &c); err != nil { - t.Fatal(err) - } - - if err := os.Setenv("INFLUXDB_UDP_BIND_ADDRESS", ":1234"); err != nil { - t.Fatalf("failed to set env var: %v", err) - } - - if err := os.Setenv("INFLUXDB_GRAPHITE_1_PROTOCOL", "udp"); err != nil { - t.Fatalf("failed to set env var: %v", err) - } - - if err := c.ApplyEnvOverrides(); err != nil { - t.Fatalf("failed to apply env overrides: %v", err) - } - - if c.UDPs[0].BindAddress != ":4444" { - t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress) - } - - if c.Graphites[1].Protocol != "udp" { - t.Fatalf("unexpected graphite protocol(0): %s", c.Graphites[0].Protocol) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server.go deleted file mode 100644 index e19b29cd4..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server.go +++ /dev/null @@ -1,646 +0,0 @@ -package run - -import ( - "fmt" - "log" - "net" - "os" - "runtime" - "runtime/pprof" - "strings" - "time" - - "github.com/influxdb/enterprise-client/v1" - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/monitor" - "github.com/influxdb/influxdb/services/admin" - "github.com/influxdb/influxdb/services/collectd" - "github.com/influxdb/influxdb/services/continuous_querier" - "github.com/influxdb/influxdb/services/copier" - "github.com/influxdb/influxdb/services/graphite" - "github.com/influxdb/influxdb/services/hh" - "github.com/influxdb/influxdb/services/httpd" - "github.com/influxdb/influxdb/services/opentsdb" - "github.com/influxdb/influxdb/services/precreator" - "github.com/influxdb/influxdb/services/registration" - "github.com/influxdb/influxdb/services/retention" - "github.com/influxdb/influxdb/services/snapshotter" - "github.com/influxdb/influxdb/services/subscriber" - "github.com/influxdb/influxdb/services/udp" - "github.com/influxdb/influxdb/tcp" - "github.com/influxdb/influxdb/tsdb" - // Initialize the engine packages - _ "github.com/influxdb/influxdb/tsdb/engine" -) - -// BuildInfo represents the build details for the server code. -type BuildInfo struct { - Version string - Commit string - Branch string - Time string -} - -// Server represents a container for the metadata and storage data and services. -// It is built using a Config and it manages the startup and shutdown of all -// services in the proper order. -type Server struct { - buildInfo BuildInfo - - err chan error - closing chan struct{} - - Hostname string - BindAddress string - Listener net.Listener - - MetaStore *meta.Store - TSDBStore *tsdb.Store - QueryExecutor *tsdb.QueryExecutor - PointsWriter *cluster.PointsWriter - ShardWriter *cluster.ShardWriter - ShardMapper *cluster.ShardMapper - HintedHandoff *hh.Service - Subscriber *subscriber.Service - - Services []Service - - // These references are required for the tcp muxer. - ClusterService *cluster.Service - SnapshotterService *snapshotter.Service - CopierService *copier.Service - - Monitor *monitor.Monitor - - // Server reporting and registration - reportingDisabled bool - - // Profiling - CPUProfile string - MemProfile string -} - -// NewServer returns a new instance of Server built from a config. -func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { - // Construct base meta store and data store. - tsdbStore := tsdb.NewStore(c.Data.Dir) - tsdbStore.EngineOptions.Config = c.Data - - s := &Server{ - buildInfo: *buildInfo, - err: make(chan error), - closing: make(chan struct{}), - - Hostname: c.Meta.Hostname, - BindAddress: c.Meta.BindAddress, - - MetaStore: meta.NewStore(c.Meta), - TSDBStore: tsdbStore, - - Monitor: monitor.New(c.Monitor), - - reportingDisabled: c.ReportingDisabled, - } - - // Copy TSDB configuration. - s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine - s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize - s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval) - s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay) - - // Set the shard mapper - s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout)) - s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping - s.ShardMapper.MetaStore = s.MetaStore - s.ShardMapper.TSDBStore = s.TSDBStore - - // Initialize query executor. - s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore) - s.QueryExecutor.MetaStore = s.MetaStore - s.QueryExecutor.MetaStatementExecutor = &meta.StatementExecutor{Store: s.MetaStore} - s.QueryExecutor.MonitorStatementExecutor = &monitor.StatementExecutor{Monitor: s.Monitor} - s.QueryExecutor.ShardMapper = s.ShardMapper - s.QueryExecutor.QueryLogEnabled = c.Data.QueryLogEnabled - - // Set the shard writer - s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout)) - s.ShardWriter.MetaStore = s.MetaStore - - // Create the hinted handoff service - s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaStore) - s.HintedHandoff.Monitor = s.Monitor - - // Create the Subscriber service - s.Subscriber = subscriber.NewService(c.Subscriber) - s.Subscriber.MetaStore = s.MetaStore - - // Initialize points writer. - s.PointsWriter = cluster.NewPointsWriter() - s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout) - s.PointsWriter.MetaStore = s.MetaStore - s.PointsWriter.TSDBStore = s.TSDBStore - s.PointsWriter.ShardWriter = s.ShardWriter - s.PointsWriter.HintedHandoff = s.HintedHandoff - s.PointsWriter.Subscriber = s.Subscriber - - // needed for executing INTO queries. - s.QueryExecutor.IntoWriter = s.PointsWriter - - // Initialize the monitor - s.Monitor.Version = s.buildInfo.Version - s.Monitor.Commit = s.buildInfo.Commit - s.Monitor.Branch = s.buildInfo.Branch - s.Monitor.BuildTime = s.buildInfo.Time - s.Monitor.MetaStore = s.MetaStore - s.Monitor.PointsWriter = s.PointsWriter - - // Append services. - s.appendClusterService(c.Cluster) - s.appendPrecreatorService(c.Precreator) - s.appendRegistrationService(c.Registration) - s.appendSnapshotterService() - s.appendCopierService() - s.appendAdminService(c.Admin) - s.appendContinuousQueryService(c.ContinuousQuery) - s.appendHTTPDService(c.HTTPD) - s.appendCollectdService(c.Collectd) - if err := s.appendOpenTSDBService(c.OpenTSDB); err != nil { - return nil, err - } - for _, g := range c.UDPs { - s.appendUDPService(g) - } - s.appendRetentionPolicyService(c.Retention) - for _, g := range c.Graphites { - if err := s.appendGraphiteService(g); err != nil { - return nil, err - } - } - - return s, nil -} - -func (s *Server) appendClusterService(c cluster.Config) { - srv := cluster.NewService(c) - srv.TSDBStore = s.TSDBStore - srv.MetaStore = s.MetaStore - s.Services = append(s.Services, srv) - s.ClusterService = srv -} - -func (s *Server) appendSnapshotterService() { - srv := snapshotter.NewService() - srv.TSDBStore = s.TSDBStore - srv.MetaStore = s.MetaStore - s.Services = append(s.Services, srv) - s.SnapshotterService = srv -} - -func (s *Server) appendCopierService() { - srv := copier.NewService() - srv.TSDBStore = s.TSDBStore - s.Services = append(s.Services, srv) - s.CopierService = srv -} - -func (s *Server) appendRetentionPolicyService(c retention.Config) { - if !c.Enabled { - return - } - srv := retention.NewService(c) - srv.MetaStore = s.MetaStore - srv.TSDBStore = s.TSDBStore - s.Services = append(s.Services, srv) -} - -func (s *Server) appendAdminService(c admin.Config) { - if !c.Enabled { - return - } - srv := admin.NewService(c) - s.Services = append(s.Services, srv) -} - -func (s *Server) appendHTTPDService(c httpd.Config) { - if !c.Enabled { - return - } - srv := httpd.NewService(c) - srv.Handler.MetaStore = s.MetaStore - srv.Handler.QueryExecutor = s.QueryExecutor - srv.Handler.PointsWriter = s.PointsWriter - srv.Handler.Version = s.buildInfo.Version - - // If a ContinuousQuerier service has been started, attach it. - for _, srvc := range s.Services { - if cqsrvc, ok := srvc.(continuous_querier.ContinuousQuerier); ok { - srv.Handler.ContinuousQuerier = cqsrvc - } - } - - s.Services = append(s.Services, srv) -} - -func (s *Server) appendCollectdService(c collectd.Config) { - if !c.Enabled { - return - } - srv := collectd.NewService(c) - srv.MetaStore = s.MetaStore - srv.PointsWriter = s.PointsWriter - s.Services = append(s.Services, srv) -} - -func (s *Server) appendOpenTSDBService(c opentsdb.Config) error { - if !c.Enabled { - return nil - } - srv, err := opentsdb.NewService(c) - if err != nil { - return err - } - srv.PointsWriter = s.PointsWriter - srv.MetaStore = s.MetaStore - s.Services = append(s.Services, srv) - return nil -} - -func (s *Server) appendGraphiteService(c graphite.Config) error { - if !c.Enabled { - return nil - } - srv, err := graphite.NewService(c) - if err != nil { - return err - } - - srv.PointsWriter = s.PointsWriter - srv.MetaStore = s.MetaStore - srv.Monitor = s.Monitor - s.Services = append(s.Services, srv) - return nil -} - -func (s *Server) appendPrecreatorService(c precreator.Config) error { - if !c.Enabled { - return nil - } - srv, err := precreator.NewService(c) - if err != nil { - return err - } - - srv.MetaStore = s.MetaStore - s.Services = append(s.Services, srv) - return nil -} - -func (s *Server) appendRegistrationService(c registration.Config) error { - if !c.Enabled { - return nil - } - srv, err := registration.NewService(c, s.buildInfo.Version) - if err != nil { - return err - } - - srv.MetaStore = s.MetaStore - srv.Monitor = s.Monitor - s.Services = append(s.Services, srv) - return nil -} - -func (s *Server) appendUDPService(c udp.Config) { - if !c.Enabled { - return - } - srv := udp.NewService(c) - srv.PointsWriter = s.PointsWriter - srv.MetaStore = s.MetaStore - s.Services = append(s.Services, srv) -} - -func (s *Server) appendContinuousQueryService(c continuous_querier.Config) { - if !c.Enabled { - return - } - srv := continuous_querier.NewService(c) - srv.MetaStore = s.MetaStore - srv.QueryExecutor = s.QueryExecutor - s.Services = append(s.Services, srv) -} - -// Err returns an error channel that multiplexes all out of band errors received from all services. -func (s *Server) Err() <-chan error { return s.err } - -// Open opens the meta and data store and all services. -func (s *Server) Open() error { - if err := func() error { - // Start profiling, if set. - startProfile(s.CPUProfile, s.MemProfile) - - host, port, err := s.hostAddr() - if err != nil { - return err - } - - hostport := net.JoinHostPort(host, port) - addr, err := net.ResolveTCPAddr("tcp", hostport) - if err != nil { - return fmt.Errorf("resolve tcp: addr=%s, err=%s", hostport, err) - } - s.MetaStore.Addr = addr - s.MetaStore.RemoteAddr = &tcpaddr{hostport} - - // Open shared TCP connection. - ln, err := net.Listen("tcp", s.BindAddress) - if err != nil { - return fmt.Errorf("listen: %s", err) - } - s.Listener = ln - - // The port 0 is used, we need to retrieve the port assigned by the kernel - if strings.HasSuffix(s.BindAddress, ":0") { - s.MetaStore.Addr = ln.Addr() - } - - // Multiplex listener. - mux := tcp.NewMux() - s.MetaStore.RaftListener = mux.Listen(meta.MuxRaftHeader) - s.MetaStore.ExecListener = mux.Listen(meta.MuxExecHeader) - s.MetaStore.RPCListener = mux.Listen(meta.MuxRPCHeader) - - s.ClusterService.Listener = mux.Listen(cluster.MuxHeader) - s.SnapshotterService.Listener = mux.Listen(snapshotter.MuxHeader) - s.CopierService.Listener = mux.Listen(copier.MuxHeader) - go mux.Serve(ln) - - // Open meta store. - if err := s.MetaStore.Open(); err != nil { - return fmt.Errorf("open meta store: %s", err) - } - go s.monitorErrorChan(s.MetaStore.Err()) - - // Wait for the store to initialize. - <-s.MetaStore.Ready() - - // Open TSDB store. - if err := s.TSDBStore.Open(); err != nil { - return fmt.Errorf("open tsdb store: %s", err) - } - - // Open the hinted handoff service - if err := s.HintedHandoff.Open(); err != nil { - return fmt.Errorf("open hinted handoff: %s", err) - } - - // Open the subcriber service - if err := s.Subscriber.Open(); err != nil { - return fmt.Errorf("open subscriber: %s", err) - } - - // Open the points writer service - if err := s.PointsWriter.Open(); err != nil { - return fmt.Errorf("open points writer: %s", err) - } - - // Open the monitor service - if err := s.Monitor.Open(); err != nil { - return fmt.Errorf("open monitor: %v", err) - } - - for _, service := range s.Services { - if err := service.Open(); err != nil { - return fmt.Errorf("open service: %s", err) - } - } - - // Start the reporting service, if not disabled. - if !s.reportingDisabled { - go s.startServerReporting() - } - - return nil - - }(); err != nil { - s.Close() - return err - } - - return nil -} - -// Close shuts down the meta and data stores and all services. -func (s *Server) Close() error { - stopProfile() - - // Close the listener first to stop any new connections - if s.Listener != nil { - s.Listener.Close() - } - - // Close services to allow any inflight requests to complete - // and prevent new requests from being accepted. - for _, service := range s.Services { - service.Close() - } - - if s.Monitor != nil { - s.Monitor.Close() - } - - if s.PointsWriter != nil { - s.PointsWriter.Close() - } - - if s.HintedHandoff != nil { - s.HintedHandoff.Close() - } - - // Close the TSDBStore, no more reads or writes at this point - if s.TSDBStore != nil { - s.TSDBStore.Close() - } - - if s.Subscriber != nil { - s.Subscriber.Close() - } - - // Finally close the meta-store since everything else depends on it - if s.MetaStore != nil { - s.MetaStore.Close() - } - - close(s.closing) - return nil -} - -// startServerReporting starts periodic server reporting. -func (s *Server) startServerReporting() { - for { - select { - case <-s.closing: - return - default: - } - if err := s.MetaStore.WaitForLeader(30 * time.Second); err != nil { - log.Printf("no leader available for reporting: %s", err.Error()) - time.Sleep(time.Second) - continue - } - s.reportServer() - <-time.After(24 * time.Hour) - } -} - -// reportServer reports anonymous statistics about the system. -func (s *Server) reportServer() { - dis, err := s.MetaStore.Databases() - if err != nil { - log.Printf("failed to retrieve databases for reporting: %s", err.Error()) - return - } - numDatabases := len(dis) - - numMeasurements := 0 - numSeries := 0 - for _, di := range dis { - d := s.TSDBStore.DatabaseIndex(di.Name) - if d == nil { - // No data in this store for this database. - continue - } - m, s := d.MeasurementSeriesCounts() - numMeasurements += m - numSeries += s - } - - clusterID, err := s.MetaStore.ClusterID() - if err != nil { - log.Printf("failed to retrieve cluster ID for reporting: %s", err.Error()) - return - } - - cl := client.New("") - usage := client.Usage{ - Product: "influxdb", - Data: []client.UsageData{ - { - Values: client.Values{ - "os": runtime.GOOS, - "arch": runtime.GOARCH, - "version": s.buildInfo.Version, - "server_id": s.MetaStore.NodeID(), - "cluster_id": clusterID, - "num_series": numSeries, - "num_measurements": numMeasurements, - "num_databases": numDatabases, - }, - }, - }, - } - - log.Printf("Sending anonymous usage statistics to m.influxdb.com") - - go cl.Save(usage) -} - -// monitorErrorChan reads an error channel and resends it through the server. -func (s *Server) monitorErrorChan(ch <-chan error) { - for { - select { - case err, ok := <-ch: - if !ok { - return - } - s.err <- err - case <-s.closing: - return - } - } -} - -// hostAddr returns the host and port that remote nodes will use to reach this -// node. -func (s *Server) hostAddr() (string, string, error) { - // Resolve host to address. - _, port, err := net.SplitHostPort(s.BindAddress) - if err != nil { - return "", "", fmt.Errorf("split bind address: %s", err) - } - - host := s.Hostname - - // See if we might have a port that will override the BindAddress port - if host != "" && host[len(host)-1] >= '0' && host[len(host)-1] <= '9' && strings.Contains(host, ":") { - hostArg, portArg, err := net.SplitHostPort(s.Hostname) - if err != nil { - return "", "", err - } - - if hostArg != "" { - host = hostArg - } - - if portArg != "" { - port = portArg - } - } - return host, port, nil -} - -// Service represents a service attached to the server. -type Service interface { - Open() error - Close() error -} - -// prof stores the file locations of active profiles. -var prof struct { - cpu *os.File - mem *os.File -} - -// StartProfile initializes the cpu and memory profile, if specified. -func startProfile(cpuprofile, memprofile string) { - if cpuprofile != "" { - f, err := os.Create(cpuprofile) - if err != nil { - log.Fatalf("cpuprofile: %v", err) - } - log.Printf("writing CPU profile to: %s\n", cpuprofile) - prof.cpu = f - pprof.StartCPUProfile(prof.cpu) - } - - if memprofile != "" { - f, err := os.Create(memprofile) - if err != nil { - log.Fatalf("memprofile: %v", err) - } - log.Printf("writing mem profile to: %s\n", memprofile) - prof.mem = f - runtime.MemProfileRate = 4096 - } - -} - -// StopProfile closes the cpu and memory profiles if they are running. -func stopProfile() { - if prof.cpu != nil { - pprof.StopCPUProfile() - prof.cpu.Close() - log.Println("CPU profile stopped") - } - if prof.mem != nil { - pprof.Lookup("heap").WriteTo(prof.mem, 0) - prof.mem.Close() - log.Println("mem profile stopped") - } -} - -type tcpaddr struct{ host string } - -func (a *tcpaddr) Network() string { return "tcp" } -func (a *tcpaddr) String() string { return a.host } diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_helpers_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_helpers_test.go deleted file mode 100644 index 620a48f84..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_helpers_test.go +++ /dev/null @@ -1,377 +0,0 @@ -// This package is a set of convenience helpers and structs to make integration testing easier -package run_test - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "net/http" - "net/url" - "os" - "regexp" - "strings" - "testing" - "time" - - "github.com/influxdb/influxdb/cmd/influxd/run" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/services/httpd" - "github.com/influxdb/influxdb/toml" -) - -// Server represents a test wrapper for run.Server. -type Server struct { - *run.Server - Config *run.Config -} - -// NewServer returns a new instance of Server. -func NewServer(c *run.Config) *Server { - buildInfo := &run.BuildInfo{ - Version: "testServer", - Commit: "testCommit", - Branch: "testBranch", - } - srv, _ := run.NewServer(c, buildInfo) - s := Server{ - Server: srv, - Config: c, - } - s.TSDBStore.EngineOptions.Config = c.Data - configureLogging(&s) - return &s -} - -// OpenServer opens a test server. -func OpenServer(c *run.Config, joinURLs string) *Server { - s := NewServer(c) - configureLogging(s) - if err := s.Open(); err != nil { - panic(err.Error()) - } - return s -} - -// OpenServerWithVersion opens a test server with a specific version. -func OpenServerWithVersion(c *run.Config, version string) *Server { - buildInfo := &run.BuildInfo{ - Version: version, - Commit: "", - Branch: "", - } - srv, _ := run.NewServer(c, buildInfo) - s := Server{ - Server: srv, - Config: c, - } - configureLogging(&s) - if err := s.Open(); err != nil { - panic(err.Error()) - } - - return &s -} - -// OpenDefaultServer opens a test server with a default database & retention policy. -func OpenDefaultServer(c *run.Config, joinURLs string) *Server { - s := OpenServer(c, joinURLs) - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - panic(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - panic(err) - } - return s -} - -// Close shuts down the server and removes all temporary paths. -func (s *Server) Close() { - s.Server.Close() - os.RemoveAll(s.Config.Meta.Dir) - os.RemoveAll(s.Config.Data.Dir) - os.RemoveAll(s.Config.HintedHandoff.Dir) -} - -// URL returns the base URL for the httpd endpoint. -func (s *Server) URL() string { - for _, service := range s.Services { - if service, ok := service.(*httpd.Service); ok { - return "http://" + service.Addr().String() - } - } - panic("httpd server not found in services") -} - -// CreateDatabaseAndRetentionPolicy will create the database and retention policy. -func (s *Server) CreateDatabaseAndRetentionPolicy(db string, rp *meta.RetentionPolicyInfo) error { - if _, err := s.MetaStore.CreateDatabase(db); err != nil { - return err - } else if _, err := s.MetaStore.CreateRetentionPolicy(db, rp); err != nil { - return err - } - return nil -} - -// Query executes a query against the server and returns the results. -func (s *Server) Query(query string) (results string, err error) { - return s.QueryWithParams(query, nil) -} - -// Query executes a query against the server and returns the results. -func (s *Server) QueryWithParams(query string, values url.Values) (results string, err error) { - if values == nil { - values = url.Values{} - } - values.Set("q", query) - return s.HTTPGet(s.URL() + "/query?" + values.Encode()) -} - -// HTTPGet makes an HTTP GET request to the server and returns the response. -func (s *Server) HTTPGet(url string) (results string, err error) { - resp, err := http.Get(url) - if err != nil { - return "", err - } - body := string(MustReadAll(resp.Body)) - switch resp.StatusCode { - case http.StatusBadRequest: - if !expectPattern(".*error parsing query*.", body) { - return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body) - } - return body, nil - case http.StatusOK: - return body, nil - default: - return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body) - } -} - -// HTTPPost makes an HTTP POST request to the server and returns the response. -func (s *Server) HTTPPost(url string, content []byte) (results string, err error) { - buf := bytes.NewBuffer(content) - resp, err := http.Post(url, "application/json", buf) - if err != nil { - return "", err - } - body := string(MustReadAll(resp.Body)) - switch resp.StatusCode { - case http.StatusBadRequest: - if !expectPattern(".*error parsing query*.", body) { - return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body) - } - return body, nil - case http.StatusOK, http.StatusNoContent: - return body, nil - default: - return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body) - } -} - -// Write executes a write against the server and returns the results. -func (s *Server) Write(db, rp, body string, params url.Values) (results string, err error) { - if params == nil { - params = url.Values{} - } - if params.Get("db") == "" { - params.Set("db", db) - } - if params.Get("rp") == "" { - params.Set("rp", rp) - } - resp, err := http.Post(s.URL()+"/write?"+params.Encode(), "", strings.NewReader(body)) - if err != nil { - return "", err - } else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return "", fmt.Errorf("invalid status code: code=%d, body=%s", resp.StatusCode, MustReadAll(resp.Body)) - } - return string(MustReadAll(resp.Body)), nil -} - -// MustWrite executes a write to the server. Panic on error. -func (s *Server) MustWrite(db, rp, body string, params url.Values) string { - results, err := s.Write(db, rp, body, params) - if err != nil { - panic(err) - } - return results -} - -// NewConfig returns the default config with temporary paths. -func NewConfig() *run.Config { - c := run.NewConfig() - c.ReportingDisabled = true - c.Cluster.ShardWriterTimeout = toml.Duration(30 * time.Second) - c.Cluster.WriteTimeout = toml.Duration(30 * time.Second) - c.Meta.Dir = MustTempFile() - c.Meta.BindAddress = "127.0.0.1:0" - c.Meta.HeartbeatTimeout = toml.Duration(50 * time.Millisecond) - c.Meta.ElectionTimeout = toml.Duration(50 * time.Millisecond) - c.Meta.LeaderLeaseTimeout = toml.Duration(50 * time.Millisecond) - c.Meta.CommitTimeout = toml.Duration(5 * time.Millisecond) - - c.Data.Dir = MustTempFile() - c.Data.WALDir = MustTempFile() - c.Data.WALLoggingEnabled = false - - c.HintedHandoff.Dir = MustTempFile() - - c.HTTPD.Enabled = true - c.HTTPD.BindAddress = "127.0.0.1:0" - c.HTTPD.LogEnabled = testing.Verbose() - - c.Monitor.StoreEnabled = false - - return c -} - -func newRetentionPolicyInfo(name string, rf int, duration time.Duration) *meta.RetentionPolicyInfo { - return &meta.RetentionPolicyInfo{Name: name, ReplicaN: rf, Duration: duration} -} - -func maxFloat64() string { - maxFloat64, _ := json.Marshal(math.MaxFloat64) - return string(maxFloat64) -} - -func maxInt64() string { - maxInt64, _ := json.Marshal(^int64(0)) - return string(maxInt64) -} - -func now() time.Time { - return time.Now().UTC() -} - -func yesterday() time.Time { - return now().Add(-1 * time.Hour * 24) -} - -func mustParseTime(layout, value string) time.Time { - tm, err := time.Parse(layout, value) - if err != nil { - panic(err) - } - return tm -} - -// MustReadAll reads r. Panic on error. -func MustReadAll(r io.Reader) []byte { - b, err := ioutil.ReadAll(r) - if err != nil { - panic(err) - } - return b -} - -// MustTempFile returns a path to a temporary file. -func MustTempFile() string { - f, err := ioutil.TempFile("", "influxd-") - if err != nil { - panic(err) - } - f.Close() - os.Remove(f.Name()) - return f.Name() -} - -func expectPattern(exp, act string) bool { - re := regexp.MustCompile(exp) - if !re.MatchString(act) { - return false - } - return true -} - -type Query struct { - name string - command string - params url.Values - exp, act string - pattern bool - skip bool - repeat int -} - -// Execute runs the command and returns an err if it fails -func (q *Query) Execute(s *Server) (err error) { - if q.params == nil { - q.act, err = s.Query(q.command) - return - } - q.act, err = s.QueryWithParams(q.command, q.params) - return -} - -func (q *Query) success() bool { - if q.pattern { - return expectPattern(q.exp, q.act) - } - return q.exp == q.act -} - -func (q *Query) Error(err error) string { - return fmt.Sprintf("%s: %v", q.name, err) -} - -func (q *Query) failureMessage() string { - return fmt.Sprintf("%s: unexpected results\nquery: %s\nexp: %s\nactual: %s\n", q.name, q.command, q.exp, q.act) -} - -type Test struct { - initialized bool - write string - params url.Values - db string - rp string - exp string - queries []*Query -} - -func NewTest(db, rp string) Test { - return Test{ - db: db, - rp: rp, - } -} - -func (t *Test) addQueries(q ...*Query) { - t.queries = append(t.queries, q...) -} - -func (t *Test) init(s *Server) error { - if t.write == "" || t.initialized { - return nil - } - t.initialized = true - if res, err := s.Write(t.db, t.rp, t.write, t.params); err != nil { - return err - } else if t.exp != res { - return fmt.Errorf("unexpected results\nexp: %s\ngot: %s\n", t.exp, res) - } - return nil -} - -func configureLogging(s *Server) { - // Set the logger to discard unless verbose is on - if !testing.Verbose() { - type logSetter interface { - SetLogger(*log.Logger) - } - nullLogger := log.New(ioutil.Discard, "", 0) - s.MetaStore.Logger = nullLogger - s.TSDBStore.Logger = nullLogger - s.HintedHandoff.SetLogger(nullLogger) - s.Monitor.SetLogger(nullLogger) - s.QueryExecutor.SetLogger(nullLogger) - s.Subscriber.SetLogger(nullLogger) - for _, service := range s.Services { - if service, ok := service.(logSetter); ok { - service.SetLogger(nullLogger) - } - } - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go deleted file mode 100644 index 3f633019a..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go +++ /dev/null @@ -1,5338 +0,0 @@ -package run_test - -import ( - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - "testing" - "time" - - "github.com/influxdb/influxdb/cluster" -) - -// Ensure that HTTP responses include the InfluxDB version. -func TestServer_HTTPResponseVersion(t *testing.T) { - version := "v1234" - s := OpenServerWithVersion(NewConfig(), version) - defer s.Close() - - resp, _ := http.Get(s.URL() + "/query") - got := resp.Header.Get("X-Influxdb-Version") - if got != version { - t.Errorf("Server responded with incorrect version, exp %s, got %s", version, got) - } -} - -// Ensure the database commands work. -func TestServer_DatabaseCommands(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - test := Test{ - queries: []*Query{ - &Query{ - name: "create database should succeed", - command: `CREATE DATABASE db0`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "create database should error with bad name", - command: `CREATE DATABASE 0xdb0`, - exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 17"}`, - }, - &Query{ - name: "show database should succeed", - command: `SHOW DATABASES`, - exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"]]}]}]}`, - }, - &Query{ - name: "create database should error if it already exists", - command: `CREATE DATABASE db0`, - exp: `{"results":[{"error":"database already exists"}]}`, - }, - &Query{ - name: "create database should not error with existing database with IF NOT EXISTS", - command: `CREATE DATABASE IF NOT EXISTS db0`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "create database should create non-existing database with IF NOT EXISTS", - command: `CREATE DATABASE IF NOT EXISTS db1`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "show database should succeed", - command: `SHOW DATABASES`, - exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db1"]]}]}]}`, - }, - &Query{ - name: "drop database db0 should succeed", - command: `DROP DATABASE db0`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "drop database db1 should succeed", - command: `DROP DATABASE db1`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "drop database should error if it does not exists", - command: `DROP DATABASE db1`, - exp: `{"results":[{"error":"database not found: db1"}]}`, - }, - &Query{ - name: "drop database should not error with non-existing database db1 WITH IF EXISTS", - command: `DROP DATABASE IF EXISTS db1`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "show database should have no results", - command: `SHOW DATABASES`, - exp: `{"results":[{"series":[{"name":"databases","columns":["name"]}]}]}`, - }, - &Query{ - name: "drop database should error if it doesn't exist", - command: `DROP DATABASE db0`, - exp: `{"results":[{"error":"database not found: db0"}]}`, - }, - }, - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_DropAndRecreateDatabase(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "Drop database after data write", - command: `DROP DATABASE db0`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "Recreate database", - command: `CREATE DATABASE db0`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "Recreate retention policy", - command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 365d REPLICATION 1 DEFAULT`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "Show measurements after recreate", - command: `SHOW MEASUREMENTS`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Query data after recreate", - command: `SELECT * FROM cpu`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_DropDatabaseIsolated(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp1", 1, 0)); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "Query data from 1st database", - command: `SELECT * FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Query data from 1st database with GROUP BY *", - command: `SELECT * FROM cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Drop other database", - command: `DROP DATABASE db1`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "Query data from 1st database and ensure it's still there", - command: `SELECT * FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Query data from 1st database and ensure it's still there with GROUP BY *", - command: `SELECT * FROM cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_DropAndRecreateSeries(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "Show series is present", - command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Drop series after data write", - command: `DROP SERIES FROM cpu`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Show series is gone", - command: `SHOW SERIES`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } - - // Re-write data and test again. - reTest := NewTest("db0", "rp0") - reTest.write = strings.Join(writes, "\n") - - reTest.addQueries([]*Query{ - &Query{ - name: "Show series is present again after re-write", - command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range reTest.queries { - if i == 0 { - if err := reTest.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_DropSeriesFromRegex(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`a,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`aa,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`b,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`c,host=serverA,region=uswest val=30.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "Show series is present", - command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"a","columns":["_key","host","region"],"values":[["a,host=serverA,region=uswest","serverA","uswest"]]},{"name":"aa","columns":["_key","host","region"],"values":[["aa,host=serverA,region=uswest","serverA","uswest"]]},{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Drop series after data write", - command: `DROP SERIES FROM /a.*/`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Show series is gone", - command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Drop series from regex that matches no measurements", - command: `DROP SERIES FROM /a.*/`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "make sure DROP SERIES doesn't delete anything when regex doesn't match", - command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Drop series with WHERE field should error", - command: `DROP SERIES FROM c WHERE val > 50.0`, - exp: `{"results":[{"error":"DROP SERIES doesn't support fields in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "make sure DROP SERIES with field in WHERE didn't delete data", - command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Drop series with WHERE time should error", - command: `DROP SERIES FROM c WHERE time > now() - 1d`, - exp: `{"results":[{"error":"DROP SERIES doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure retention policy commands work. -func TestServer_RetentionPolicyCommands(t *testing.T) { - t.Parallel() - c := NewConfig() - c.Meta.RetentionAutoCreate = false - s := OpenServer(c, "") - defer s.Close() - - // Create a database. - if _, err := s.MetaStore.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - test := Test{ - queries: []*Query{ - &Query{ - name: "create retention policy should succeed", - command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 1h REPLICATION 1`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "create retention policy should error if it already exists", - command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 1h REPLICATION 1`, - exp: `{"results":[{"error":"retention policy already exists"}]}`, - }, - &Query{ - name: "show retention policy should succeed", - command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","1h0m0s",1,false]]}]}]}`, - }, - &Query{ - name: "alter retention policy should succeed", - command: `ALTER RETENTION POLICY rp0 ON db0 DURATION 2h REPLICATION 3 DEFAULT`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "show retention policy should have new altered information", - command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`, - }, - &Query{ - name: "dropping default retention policy should not succeed", - command: `DROP RETENTION POLICY rp0 ON db0`, - exp: `{"results":[{"error":"retention policy is default"}]}`, - }, - &Query{ - name: "show retention policy should still show policy", - command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`, - }, - &Query{ - name: "create a second non-default retention policy", - command: `CREATE RETENTION POLICY rp2 ON db0 DURATION 1h REPLICATION 1`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "show retention policy should show both", - command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true],["rp2","1h0m0s",1,false]]}]}]}`, - }, - &Query{ - name: "dropping non-default retention policy succeed", - command: `DROP RETENTION POLICY rp2 ON db0`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "show retention policy should show just default", - command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`, - }, - &Query{ - name: "Ensure retention policy with unacceptable retention cannot be created", - command: `CREATE RETENTION POLICY rp3 ON db0 DURATION 1s REPLICATION 1`, - exp: `{"results":[{"error":"retention policy duration must be at least 1h0m0s"}]}`, - }, - &Query{ - name: "Check error when deleting retention policy on non-existent database", - command: `DROP RETENTION POLICY rp1 ON mydatabase`, - exp: `{"results":[{"error":"database not found"}]}`, - }, - }, - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the autocreation of retention policy works. -func TestServer_DatabaseRetentionPolicyAutoCreate(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - test := Test{ - queries: []*Query{ - &Query{ - name: "create database should succeed", - command: `CREATE DATABASE db0`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "show retention policies should return auto-created policy", - command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["default","0",1,true]]}]}]}`, - }, - }, - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure user commands work. -func TestServer_UserCommands(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - // Create a database. - if _, err := s.MetaStore.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - test := Test{ - queries: []*Query{ - &Query{ - name: "show users, no actual users", - command: `SHOW USERS`, - exp: `{"results":[{"series":[{"columns":["user","admin"]}]}]}`, - }, - &Query{ - name: `create user`, - command: "CREATE USER jdoe WITH PASSWORD '1337'", - exp: `{"results":[{}]}`, - }, - &Query{ - name: "show users, 1 existing user", - command: `SHOW USERS`, - exp: `{"results":[{"series":[{"columns":["user","admin"],"values":[["jdoe",false]]}]}]}`, - }, - &Query{ - name: "grant all priviledges to jdoe", - command: `GRANT ALL PRIVILEGES TO jdoe`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "show users, existing user as admin", - command: `SHOW USERS`, - exp: `{"results":[{"series":[{"columns":["user","admin"],"values":[["jdoe",true]]}]}]}`, - }, - &Query{ - name: "grant DB privileges to user", - command: `GRANT READ ON db0 TO jdoe`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "revoke all privileges", - command: `REVOKE ALL PRIVILEGES FROM jdoe`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "bad create user request", - command: `CREATE USER 0xBAD WITH PASSWORD pwd1337`, - exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 13"}`, - }, - &Query{ - name: "bad create user request, no name", - command: `CREATE USER WITH PASSWORD pwd1337`, - exp: `{"error":"error parsing query: found WITH, expected identifier at line 1, char 13"}`, - }, - &Query{ - name: "bad create user request, no password", - command: `CREATE USER jdoe`, - exp: `{"error":"error parsing query: found EOF, expected WITH at line 1, char 18"}`, - }, - &Query{ - name: "drop user", - command: `DROP USER jdoe`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "make sure user was dropped", - command: `SHOW USERS`, - exp: `{"results":[{"series":[{"columns":["user","admin"]}]}]}`, - }, - &Query{ - name: "delete non existing user", - command: `DROP USER noone`, - exp: `{"results":[{"error":"user not found"}]}`, - }, - }, - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(fmt.Sprintf("command: %s - err: %s", query.command, query.Error(err))) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can create a single point via json protocol and read it back. -func TestServer_Write_JSON(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - if res, err := s.Write("", "", fmt.Sprintf(`{"database" : "db0", "retentionPolicy" : "rp0", "points": [{"measurement": "cpu", "tags": {"host": "server02"},"fields": {"value": 1.0}}],"time":"%s"} `, now.Format(time.RFC3339Nano)), nil); err != nil { - t.Fatal(err) - } else if exp := ``; exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } - - // Verify the data was written. - if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { - t.Fatal(err) - } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } -} - -// Ensure the server can create a single point via line protocol with float type and read it back. -func TestServer_Write_LineProtocol_Float(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=1.0 `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { - t.Fatal(err) - } else if exp := ``; exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } - - // Verify the data was written. - if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { - t.Fatal(err) - } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } -} - -// Ensure the server can create a single point via line protocol with bool type and read it back. -func TestServer_Write_LineProtocol_Bool(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=true `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { - t.Fatal(err) - } else if exp := ``; exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } - - // Verify the data was written. - if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { - t.Fatal(err) - } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",true]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } -} - -// Ensure the server can create a single point via line protocol with string type and read it back. -func TestServer_Write_LineProtocol_String(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - if res, err := s.Write("db0", "rp0", `cpu,host=server01 value="disk full" `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { - t.Fatal(err) - } else if exp := ``; exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } - - // Verify the data was written. - if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { - t.Fatal(err) - } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s","disk full"]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } -} - -// Ensure the server can create a single point via line protocol with integer type and read it back. -func TestServer_Write_LineProtocol_Integer(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=100 `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { - t.Fatal(err) - } else if exp := ``; exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } - - // Verify the data was written. - if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { - t.Fatal(err) - } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } -} - -// Ensure the server returns a partial write response when some points fail to parse. Also validate that -// the successfully parsed points can be queried. -func TestServer_Write_LineProtocol_Partial(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - points := []string{ - "cpu,host=server01 value=100 " + strconv.FormatInt(now.UnixNano(), 10), - "cpu,host=server01 value=NaN " + strconv.FormatInt(now.UnixNano(), 20), - "cpu,host=server01 value=NaN " + strconv.FormatInt(now.UnixNano(), 30), - } - if res, err := s.Write("db0", "rp0", strings.Join(points, "\n"), nil); err == nil { - t.Fatal("expected error. got nil", err) - } else if exp := ``; exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } else if exp := "partial write"; !strings.Contains(err.Error(), exp) { - t.Fatalf("unexpected error: exp\nexp: %v\ngot: %v", exp, err) - } - - // Verify the data was written. - if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { - t.Fatal(err) - } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } -} - -// Ensure the server can query with default databases (via param) and default retention policy -func TestServer_Query_DefaultDBAndRP(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - test := NewTest("db0", "rp0") - test.write = fmt.Sprintf(`cpu value=1.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()) - - test.addQueries([]*Query{ - &Query{ - name: "default db and rp", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, - }, - &Query{ - name: "default rp exists", - command: `show retention policies ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["default","0",1,false],["rp0","1h0m0s",1,true]]}]}]}`, - }, - &Query{ - name: "default rp", - command: `SELECT * FROM db0..cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, - }, - &Query{ - name: "default dp", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM rp0.cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can have a database with multiple measurements. -func TestServer_Query_Multiple_Measurements(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - // Make sure we do writes for measurements that will span across shards - writes := []string{ - fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "measurement in one shard but not another shouldn't panic server", - command: `SELECT host,value FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["2000-01-01T00:00:00Z","server01",100]]}]}]}`, - }, - &Query{ - name: "measurement in one shard but not another shouldn't panic server", - command: `SELECT host,value FROM db0.rp0.cpu GROUP BY host`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server correctly supports data with identical tag values. -func TestServer_Query_IdenticalTagValues(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf("cpu,t1=val1 value=1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf("cpu,t2=val2 value=2 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf("cpu,t1=val2 value=3 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:02:00Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "measurements with identical tag values - SELECT *, no GROUP BY", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]},{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]}]}]}`, - }, - &Query{ - name: "measurements with identical tag values - SELECT *, with GROUP BY", - command: `SELECT value FROM db0.rp0.cpu GROUP BY t1,t2`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]},{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]}]}]}`, - }, - &Query{ - name: "measurements with identical tag values - SELECT value no GROUP BY", - command: `SELECT value FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1],["2000-01-01T00:01:00Z",2],["2000-01-01T00:02:00Z",3]]}]}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can handle a query that involves accessing no shards. -func TestServer_Query_NoShards(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = `cpu,host=server01 value=1 ` + strconv.FormatInt(now.UnixNano(), 10) - - test.addQueries([]*Query{ - &Query{ - name: "selecting value should succeed", - command: `SELECT value FROM db0.rp0.cpu WHERE time < now() - 1d`, - exp: `{"results":[{}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can query a non-existent field -func TestServer_Query_NonExistent(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = `cpu,host=server01 value=1 ` + strconv.FormatInt(now.UnixNano(), 10) - - test.addQueries([]*Query{ - &Query{ - name: "selecting value should succeed", - command: `SELECT value FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "selecting non-existent should succeed", - command: `SELECT foo FROM db0.rp0.cpu`, - exp: `{"results":[{}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can perform basic math -func TestServer_Query_Math(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db", newRetentionPolicyInfo("rp", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - writes := []string{ - "float value=42 " + strconv.FormatInt(now.UnixNano(), 10), - "integer value=42i " + strconv.FormatInt(now.UnixNano(), 10), - } - - test := NewTest("db", "rp") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "SELECT multiple of float value", - command: `SELECT value * 2 from db.rp.float`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "SELECT multiple of float value", - command: `SELECT 2 * value from db.rp.float`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "SELECT multiple of integer value", - command: `SELECT value * 2 from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "SELECT float multiple of integer value", - command: `SELECT value * 2.0 from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can query with the count aggregate function -func TestServer_Query_Count(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10) - - hour_ago := now.Add(-time.Hour).UTC() - - test.addQueries([]*Query{ - &Query{ - name: "selecting count(value) should succeed", - command: `SELECT count(value) FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - &Query{ - name: "selecting count(value) with where time should return result", - command: fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE time >= '%s'`, hour_ago.Format(time.RFC3339Nano)), - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["%s",1]]}]}]}`, hour_ago.Format(time.RFC3339Nano)), - }, - &Query{ - name: "selecting count(*) should error", - command: `SELECT count(*) FROM db0.rp0.cpu`, - exp: `{"error":"error parsing query: expected field argument in count()"}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can query with Now(). -func TestServer_Query_Now(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10) - - test.addQueries([]*Query{ - &Query{ - name: "where with time < now() should work", - command: `SELECT * FROM db0.rp0.cpu where time < now()`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["%s","server01",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "where with time < now() and GROUP BY * should work", - command: `SELECT * FROM db0.rp0.cpu where time < now() GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "where with time > now() should return an empty result", - command: `SELECT * FROM db0.rp0.cpu where time > now()`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "where with time > now() with GROUP BY * should return an empty result", - command: `SELECT * FROM db0.rp0.cpu where time > now() GROUP BY *`, - exp: `{"results":[{}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can query with epoch precisions. -func TestServer_Query_EpochPrecision(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10) - - test.addQueries([]*Query{ - &Query{ - name: "nanosecond precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"n"}}, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()), - }, - &Query{ - name: "microsecond precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"u"}}, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Microsecond)), - }, - &Query{ - name: "millisecond precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"ms"}}, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Millisecond)), - }, - &Query{ - name: "second precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"s"}}, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Second)), - }, - &Query{ - name: "minute precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"m"}}, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Minute)), - }, - &Query{ - name: "hour precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"h"}}, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Hour)), - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server works with tag queries. -func TestServer_Query_Tags(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - writes := []string{ - fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", now.UnixNano()), - fmt.Sprintf("cpu,host=server02 value=50,core=2 %d", now.Add(1).UnixNano()), - - fmt.Sprintf("cpu1,host=server01,region=us-west value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf("cpu1,host=server02 value=200 %d", mustParseTime(time.RFC3339Nano, "2010-02-28T01:03:37.703820946Z").UnixNano()), - fmt.Sprintf("cpu1,host=server03 value=300 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), - - fmt.Sprintf("cpu2,host=server01 value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf("cpu2 value=200 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), - - fmt.Sprintf("cpu3,company=acme01 value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf("cpu3 value=200 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), - - fmt.Sprintf("status_code,url=http://www.example.com value=404 %d", mustParseTime(time.RFC3339Nano, "2015-07-22T08:13:54.929026672Z").UnixNano()), - fmt.Sprintf("status_code,url=https://influxdb.com value=418 %d", mustParseTime(time.RFC3339Nano, "2015-07-22T09:52:24.914395083Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "tag without field should return error", - command: `SELECT host FROM db0.rp0.cpu`, - exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, - }, - &Query{ - name: "field with tag should succeed", - command: `SELECT host, value FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["%s","server01",100],["%s","server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "field with tag and GROUP BY should succeed", - command: `SELECT host, value FROM db0.rp0.cpu GROUP BY host`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "field with two tags should succeed", - command: `SELECT host, value, core FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value","core"],"values":[["%s","server01",100,4],["%s","server02",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "field with two tags and GROUP BY should succeed", - command: `SELECT host, value, core FROM db0.rp0.cpu GROUP BY host`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value","core"],"values":[["%s",100,4]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value","core"],"values":[["%s",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "select * with tags should succeed", - command: `SELECT * FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","core","host","value"],"values":[["%s",4,"server01",100],["%s",2,"server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "select * with tags with GROUP BY * should succeed", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","core","value"],"values":[["%s",4,100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","core","value"],"values":[["%s",2,50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "group by tag", - command: `SELECT value FROM db0.rp0.cpu GROUP by host`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "single field (EQ tag value1)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (2 EQ tags)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' AND region = 'us-west'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (OR different tags)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server03' OR region = 'us-west'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (OR with non-existent tag value)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' OR host = 'server66'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (OR with all tag values)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' OR host = 'server02' OR host = 'server03'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (1 EQ and 1 NEQ tag)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' AND region != 'us-west'`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "single field (EQ tag value2)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server02'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200]]}]}]}`, - }, - &Query{ - name: "single field (NEQ tag value1)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300]]}]}]}`, - }, - &Query{ - name: "single field (NEQ tag value1 AND NEQ tag value2)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' AND host != 'server02'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",300]]}]}]}`, - }, - &Query{ - name: "single field (NEQ tag value1 OR NEQ tag value2)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' OR host != 'server02'`, // Yes, this is always true, but that's the point. - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (NEQ tag value1 AND NEQ tag value2 AND NEQ tag value3)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' AND host != 'server02' AND host != 'server03'`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "single field (NEQ tag value1, point without any tags)", - command: `SELECT value FROM db0.rp0.cpu2 WHERE host != 'server01'`, - exp: `{"results":[{"series":[{"name":"cpu2","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200]]}]}]}`, - }, - &Query{ - name: "single field (NEQ tag value1, point without any tags)", - command: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme01/`, - exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200]]}]}]}`, - }, - &Query{ - name: "single field (regex tag match)", - command: `SELECT value FROM db0.rp0.cpu3 WHERE company =~ /acme01/`, - exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (regex tag match)", - command: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme[23]/`, - exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (regex tag match with escaping)", - command: `SELECT value FROM db0.rp0.status_code WHERE url !~ /https\:\/\/influxdb\.com/`, - exp: `{"results":[{"series":[{"name":"status_code","columns":["time","value"],"values":[["2015-07-22T08:13:54.929026672Z",404]]}]}]}`, - }, - &Query{ - name: "single field (regex tag match with escaping)", - command: `SELECT value FROM db0.rp0.status_code WHERE url =~ /https\:\/\/influxdb\.com/`, - exp: `{"results":[{"series":[{"name":"status_code","columns":["time","value"],"values":[["2015-07-22T09:52:24.914395083Z",418]]}]}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server correctly queries with an alias. -func TestServer_Query_Alias(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf("cpu value=1i,steps=3i %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf("cpu value=2i,steps=4i %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "baseline query - SELECT * FROM db0.rp0.cpu", - command: `SELECT * FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","steps","value"],"values":[["2000-01-01T00:00:00Z",3,1],["2000-01-01T00:01:00Z",4,2]]}]}]}`, - }, - &Query{ - name: "basic query with alias - SELECT steps, value as v FROM db0.rp0.cpu", - command: `SELECT steps, value as v FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","steps","v"],"values":[["2000-01-01T00:00:00Z",3,1],["2000-01-01T00:01:00Z",4,2]]}]}]}`, - }, - &Query{ - name: "double aggregate sum - SELECT sum(value), sum(steps) FROM db0.rp0.cpu", - command: `SELECT sum(value), sum(steps) FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","sum"],"values":[["1970-01-01T00:00:00Z",3,7]]}]}]}`, - }, - &Query{ - name: "double aggregate sum reverse order - SELECT sum(steps), sum(value) FROM db0.rp0.cpu", - command: `SELECT sum(steps), sum(value) FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","sum"],"values":[["1970-01-01T00:00:00Z",7,3]]}]}]}`, - }, - &Query{ - name: "double aggregate sum with alias - SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu", - command: `SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sumv","sums"],"values":[["1970-01-01T00:00:00Z",3,7]]}]}]}`, - }, - &Query{ - name: "double aggregate with same value - SELECT sum(value), mean(value) FROM db0.rp0.cpu", - command: `SELECT sum(value), mean(value) FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",3,1.5]]}]}]}`, - }, - &Query{ - name: "double aggregate with same value and same alias - SELECT mean(value) as mv, max(value) as mv FROM db0.rp0.cpu", - command: `SELECT mean(value) as mv, max(value) as mv FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","mv","mv"],"values":[["1970-01-01T00:00:00Z",1.5,2]]}]}]}`, - }, - &Query{ - name: "double aggregate with non-existent field - SELECT mean(value), max(foo) FROM db0.rp0.cpu", - command: `SELECT mean(value), max(foo) FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","mean","max"],"values":[["1970-01-01T00:00:00Z",1.5,null]]}]}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server will succeed and error for common scenarios. -func TestServer_Query_Common(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = fmt.Sprintf("cpu,host=server01 value=1 %s", strconv.FormatInt(now.UnixNano(), 10)) - - test.addQueries([]*Query{ - &Query{ - name: "selecting a from a non-existent database should error", - command: `SELECT value FROM db1.rp0.cpu`, - exp: `{"results":[{"error":"database not found: db1"}]}`, - }, - &Query{ - name: "selecting a from a non-existent retention policy should error", - command: `SELECT value FROM db0.rp1.cpu`, - exp: `{"results":[{"error":"retention policy not found"}]}`, - }, - &Query{ - name: "selecting a valid measurement and field should succeed", - command: `SELECT value FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "explicitly selecting time and a valid measurement and field should succeed", - command: `SELECT time,value FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "selecting a measurement that doesn't exist should result in empty set", - command: `SELECT value FROM db0.rp0.idontexist`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "selecting a field that doesn't exist should result in empty set", - command: `SELECT idontexist FROM db0.rp0.cpu`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "selecting wildcard without specifying a database should error", - command: `SELECT * FROM cpu`, - exp: `{"results":[{"error":"database name required"}]}`, - }, - &Query{ - name: "selecting explicit field without specifying a database should error", - command: `SELECT value FROM cpu`, - exp: `{"results":[{"error":"database name required"}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can query two points. -func TestServer_Query_SelectTwoPoints(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = fmt.Sprintf("cpu value=100 %s\ncpu value=200 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10)) - - test.addQueries( - &Query{ - name: "selecting two points should result in two points", - command: `SELECT * FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "selecting two points with GROUP BY * should result in two points", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - ) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can query two negative points. -func TestServer_Query_SelectTwoNegativePoints(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = fmt.Sprintf("cpu value=-100 %s\ncpu value=-200 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10)) - - test.addQueries(&Query{ - name: "selecting two negative points should succeed", - command: `SELECT * FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",-100],["%s",-200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can query with relative time. -func TestServer_Query_SelectRelativeTime(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - yesterday := yesterday() - - test := NewTest("db0", "rp0") - test.write = fmt.Sprintf("cpu,host=server01 value=100 %s\ncpu,host=server01 value=200 %s", strconv.FormatInt(yesterday.UnixNano(), 10), strconv.FormatInt(now.UnixNano(), 10)) - - test.addQueries([]*Query{ - &Query{ - name: "single point with time pre-calculated for past time queries yesterday", - command: `SELECT * FROM db0.rp0.cpu where time >= '` + yesterday.Add(-1*time.Minute).Format(time.RFC3339Nano) + `' GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, yesterday.Format(time.RFC3339Nano), now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "single point with time pre-calculated for relative time queries now", - command: `SELECT * FROM db0.rp0.cpu where time >= now() - 1m GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",200]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can handle various simple calculus queries. -func TestServer_Query_SelectRawCalculus(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - test := NewTest("db0", "rp0") - test.write = fmt.Sprintf("cpu value=210 1278010021000000000\ncpu value=10 1278010022000000000") - - test.addQueries([]*Query{ - &Query{ - name: "calculate single derivate", - command: `SELECT derivative(value) from db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-200]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// mergeMany ensures that when merging many series together and some of them have a different number -// of points than others in a group by interval the results are correct -func TestServer_Query_MergeMany(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - - test := NewTest("db0", "rp0") - - writes := []string{} - for i := 1; i < 11; i++ { - for j := 1; j < 5+i%3; j++ { - data := fmt.Sprintf(`cpu,host=server_%d value=22 %d`, i, time.Unix(int64(j), int64(0)).UTC().UnixNano()) - writes = append(writes, data) - } - } - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "GROUP by time", - command: `SELECT count(value) FROM db0.rp0.cpu WHERE time >= '1970-01-01T00:00:01Z' AND time <= '1970-01-01T00:00:06Z' GROUP BY time(1s)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:01Z",10],["1970-01-01T00:00:02Z",10],["1970-01-01T00:00:03Z",10],["1970-01-01T00:00:04Z",10],["1970-01-01T00:00:05Z",7],["1970-01-01T00:00:06Z",3]]}]}]}`, - }, - &Query{ - skip: true, - name: "GROUP by tag - FIXME issue #2875", - command: `SELECT count(value) FROM db0.rp0.cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:00Z' group by host`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server03"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, - }, - &Query{ - name: "GROUP by field", - command: `SELECT count(value) FROM db0.rp0.cpu group by value`, - exp: `{"results":[{"error":"can not use field in GROUP BY clause: value"}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_SLimitAndSOffset(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - - test := NewTest("db0", "rp0") - - writes := []string{} - for i := 1; i < 10; i++ { - data := fmt.Sprintf(`cpu,region=us-east,host=server-%d value=%d %d`, i, i, time.Unix(int64(i), int64(0)).UnixNano()) - writes = append(writes, data) - } - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "SLIMIT 2 SOFFSET 1", - command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 1`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-2","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-3","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - &Query{ - name: "SLIMIT 2 SOFFSET 3", - command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 3`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-4","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-5","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - &Query{ - name: "SLIMIT 3 SOFFSET 8", - command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 3 SOFFSET 8`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-9","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Regex(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu1,host=server01 value=10 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf(`cpu2,host=server01 value=20 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf(`cpu3,host=server01 value=30 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "default db and rp", - command: `SELECT * FROM /cpu[13]/`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","host","value"],"values":[["2015-02-28T01:03:36.703820946Z","server01",10]]},{"name":"cpu3","columns":["time","host","value"],"values":[["2015-02-28T01:03:36.703820946Z","server01",30]]}]}]}`, - }, - &Query{ - name: "default db and rp with GROUP BY *", - command: `SELECT * FROM /cpu[13]/ GROUP BY *`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, - }, - &Query{ - name: "specifying db and rp", - command: `SELECT * FROM db0.rp0./cpu[13]/ GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, - }, - &Query{ - name: "default db and specified rp", - command: `SELECT * FROM rp0./cpu[13]/ GROUP BY *`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, - }, - &Query{ - name: "specified db and default rp", - command: `SELECT * FROM db0../cpu[13]/ GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Aggregates_Int(t *testing.T) { - t.Parallel() - s := OpenDefaultServer(NewConfig(), "") - defer s.Close() - - test := NewTest("db0", "rp0") - test.write = strings.Join([]string{ - fmt.Sprintf(`int value=45 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - }, "\n") - - test.addQueries([]*Query{ - // int64 - &Query{ - name: "stddev with just one point - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT STDDEV(value) FROM int`, - exp: `{"results":[{"series":[{"name":"int","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Aggregates_IntMax(t *testing.T) { - t.Parallel() - s := OpenDefaultServer(NewConfig(), "") - defer s.Close() - - test := NewTest("db0", "rp0") - test.write = strings.Join([]string{ - fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), - }, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "large mean and stddev - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEAN(value), STDDEV(value) FROM intmax`, - exp: `{"results":[{"series":[{"name":"intmax","columns":["time","mean","stddev"],"values":[["1970-01-01T00:00:00Z",` + maxInt64() + `,0]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Aggregates_IntMany(t *testing.T) { - t.Parallel() - s := OpenDefaultServer(NewConfig(), "") - defer s.Close() - - test := NewTest("db0", "rp0") - test.write = strings.Join([]string{ - fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), - fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), - }, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "mean and stddev - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEAN(value), STDDEV(value) FROM intmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","mean","stddev"],"values":[["2000-01-01T00:00:00Z",5,2.138089935299395]]}]}]}`, - }, - &Query{ - name: "first - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT FIRST(value) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - &Query{ - name: "first - int - epoch ms", - params: url.Values{"db": []string{"db0"}, "epoch": []string{"ms"}}, - command: `SELECT FIRST(value) FROM intmany`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[[%d,2]]}]}]}`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()/int64(time.Millisecond)), - }, - &Query{ - name: "last - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT LAST(value) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:01:10Z",9]]}]}]}`, - }, - &Query{ - name: "spread - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SPREAD(value) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","spread"],"values":[["1970-01-01T00:00:00Z",7]]}]}]}`, - }, - &Query{ - name: "median - even count - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4.5]]}]}]}`, - }, - &Query{ - name: "median - odd count - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM intmany where time < '2000-01-01T00:01:10Z'`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, - }, - &Query{ - name: "distinct as call - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT(value) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, - }, - &Query{ - name: "distinct alt syntax - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT value FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, - }, - &Query{ - name: "distinct select tag - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT(host) FROM intmany`, - exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, - }, - &Query{ - name: "distinct alt select tag - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT host FROM intmany`, - exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, - }, - &Query{ - name: "count distinct - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT value) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, - }, - &Query{ - name: "count distinct as call - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT(value)) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, - }, - &Query{ - name: "count distinct select tag - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT host) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - }, - &Query{ - name: "count distinct as call select tag - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT host) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Aggregates_IntMany_GroupBy(t *testing.T) { - t.Parallel() - s := OpenDefaultServer(NewConfig(), "") - defer s.Close() - - test := NewTest("db0", "rp0") - test.write = strings.Join([]string{ - fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), - fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), - }, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "max order by time with time specified group by 10s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(10s)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:00Z",7],["2000-01-01T00:01:10Z",9]]}]}]}`, - }, - &Query{ - name: "max order by time without time specified group by 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, - }, - &Query{ - name: "max order by time with time specified group by 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:01:10Z",9]]}]}]}`, - }, - &Query{ - name: "min order by time without time specified group by 15s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, - }, - &Query{ - name: "min order by time with time specified group by 15s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, - }, - &Query{ - name: "first order by time without time specified group by 15s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, - }, - &Query{ - name: "first order by time with time specified group by 15s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, - }, - &Query{ - name: "last order by time without time specified group by 15s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, - }, - &Query{ - name: "last order by time with time specified group by 15s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:10Z",9]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Aggregates_IntMany_OrderByDesc(t *testing.T) { - t.Parallel() - s := OpenDefaultServer(NewConfig(), "") - defer s.Close() - - test := NewTest("db0", "rp0") - test.write = strings.Join([]string{ - fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), - fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), - }, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "aggregate order by time desc", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:00Z' group by time(10s) order by time desc`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:01:00Z",7],["2000-01-01T00:00:50Z",5],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Aggregates_IntOverlap(t *testing.T) { - t.Parallel() - s := OpenDefaultServer(NewConfig(), "") - defer s.Close() - - test := NewTest("db0", "rp0") - test.write = strings.Join([]string{ - fmt.Sprintf(`intoverlap,region=us-east value=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intoverlap,region=us-east value=30 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`intoverlap,region=us-west value=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intoverlap,region=us-east otherVal=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - }, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "aggregation with no interval - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT count(value) FROM intoverlap WHERE time = '2000-01-01 00:00:00'`, - exp: `{"results":[{"series":[{"name":"intoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - &Query{ - name: "sum - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM intoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`, - exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:10Z",30]]}]}]}`, - }, - &Query{ - name: "aggregation with a null field value - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM intoverlap GROUP BY region`, - exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, - }, - &Query{ - name: "multiple aggregations - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value), MEAN(value) FROM intoverlap GROUP BY region`, - exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",50,25]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",100,100]]}]}]}`, - }, - &Query{ - skip: true, - name: "multiple aggregations with division - int FIXME issue #2879", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value), mean(value), sum(value) / mean(value) as div FROM intoverlap GROUP BY region`, - exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean","div"],"values":[["1970-01-01T00:00:00Z",50,25,2]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",100,100,1]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Aggregates_FloatSingle(t *testing.T) { - t.Parallel() - s := OpenDefaultServer(NewConfig(), "") - defer s.Close() - - test := NewTest("db0", "rp0") - test.write = strings.Join([]string{ - fmt.Sprintf(`floatsingle value=45.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - }, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "stddev with just one point - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT STDDEV(value) FROM floatsingle`, - exp: `{"results":[{"series":[{"name":"floatsingle","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Aggregates_FloatMany(t *testing.T) { - t.Parallel() - s := OpenDefaultServer(NewConfig(), "") - defer s.Close() - - test := NewTest("db0", "rp0") - test.write = strings.Join([]string{ - fmt.Sprintf(`floatmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), - }, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "mean and stddev - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEAN(value), STDDEV(value) FROM floatmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","mean","stddev"],"values":[["2000-01-01T00:00:00Z",5,2.138089935299395]]}]}]}`, - }, - &Query{ - name: "first - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT FIRST(value) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - &Query{ - name: "last - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT LAST(value) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","last"],"values":[["2000-01-01T00:01:10Z",9]]}]}]}`, - }, - &Query{ - name: "spread - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SPREAD(value) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","spread"],"values":[["1970-01-01T00:00:00Z",7]]}]}]}`, - }, - &Query{ - name: "median - even count - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4.5]]}]}]}`, - }, - &Query{ - name: "median - odd count - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM floatmany where time < '2000-01-01T00:01:10Z'`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, - }, - &Query{ - name: "distinct as call - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT(value) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, - }, - &Query{ - name: "distinct alt syntax - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT value FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, - }, - &Query{ - name: "distinct select tag - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT(host) FROM floatmany`, - exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, - }, - &Query{ - name: "distinct alt select tag - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT host FROM floatmany`, - exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, - }, - &Query{ - name: "count distinct - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT value) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, - }, - &Query{ - name: "count distinct as call - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT(value)) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, - }, - &Query{ - name: "count distinct select tag - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT host) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - }, - &Query{ - name: "count distinct as call select tag - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT host) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Aggregates_FloatOverlap(t *testing.T) { - t.Parallel() - s := OpenDefaultServer(NewConfig(), "") - defer s.Close() - - test := NewTest("db0", "rp0") - test.write = strings.Join([]string{ - fmt.Sprintf(`floatoverlap,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`floatoverlap,region=us-east value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`floatoverlap,region=us-west value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`floatoverlap,region=us-east otherVal=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - }, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "aggregation with no interval - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT count(value) FROM floatoverlap WHERE time = '2000-01-01 00:00:00'`, - exp: `{"results":[{"series":[{"name":"floatoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - &Query{ - name: "sum - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM floatoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`, - exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:10Z",30]]}]}]}`, - }, - &Query{ - name: "aggregation with a null field value - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM floatoverlap GROUP BY region`, - exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, - }, - &Query{ - name: "multiple aggregations - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value), MEAN(value) FROM floatoverlap GROUP BY region`, - exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",50,25]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",100,100]]}]}]}`, - }, - &Query{ - name: "multiple aggregations with division - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) / mean(value) as div FROM floatoverlap GROUP BY region`, - exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",2]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Aggregates_Load(t *testing.T) { - t.Parallel() - s := OpenDefaultServer(NewConfig(), "") - defer s.Close() - - test := NewTest("db0", "rp0") - test.write = strings.Join([]string{ - fmt.Sprintf(`load,region=us-east,host=serverA value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`load,region=us-east,host=serverB value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`load,region=us-west,host=serverC value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - }, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "group by multiple dimensions", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) FROM load GROUP BY region, host`, - exp: `{"results":[{"series":[{"name":"load","tags":{"host":"serverA","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",20]]},{"name":"load","tags":{"host":"serverB","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",30]]},{"name":"load","tags":{"host":"serverC","region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, - }, - &Query{ - name: "group by multiple dimensions", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value)*2 FROM load`, - exp: `{"results":[{"series":[{"name":"load","columns":["time",""],"values":[["1970-01-01T00:00:00Z",300]]}]}]}`, - }, - &Query{ - name: "group by multiple dimensions", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value)/2 FROM load`, - exp: `{"results":[{"series":[{"name":"load","columns":["time",""],"values":[["1970-01-01T00:00:00Z",75]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Aggregates_CPU(t *testing.T) { - t.Parallel() - s := OpenDefaultServer(NewConfig(), "") - defer s.Close() - - test := NewTest("db0", "rp0") - test.write = strings.Join([]string{ - fmt.Sprintf(`cpu,region=uk,host=serverZ,service=redis value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - fmt.Sprintf(`cpu,region=uk,host=serverZ,service=mysql value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - }, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "aggregation with WHERE and AND", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) FROM cpu WHERE region='uk' AND host='serverZ'`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Aggregates_String(t *testing.T) { - t.Parallel() - s := OpenDefaultServer(NewConfig(), "") - defer s.Close() - - test := NewTest("db0", "rp0") - test.write = strings.Join([]string{ - fmt.Sprintf(`stringdata value="first" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - fmt.Sprintf(`stringdata value="last" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:04Z").UnixNano()), - }, "\n") - - test.addQueries([]*Query{ - // strings - &Query{ - name: "STDDEV on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT STDDEV(value) FROM stringdata`, - exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, - }, - &Query{ - name: "MEAN on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEAN(value) FROM stringdata`, - exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - }, - &Query{ - name: "MEDIAN on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM stringdata`, - exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, - }, - &Query{ - name: "COUNT on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(value) FROM stringdata`, - exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, - }, - &Query{ - name: "FIRST on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT FIRST(value) FROM stringdata`, - exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","first"],"values":[["2000-01-01T00:00:03Z","first"]]}]}]}`, - }, - &Query{ - name: "LAST on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT LAST(value) FROM stringdata`, - exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","last"],"values":[["2000-01-01T00:00:04Z","last"]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_AggregateSelectors(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`network,host=server01,region=west,core=1 rx=10i,tx=20i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`network,host=server02,region=west,core=2 rx=40i,tx=50i,core=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`network,host=server03,region=east,core=3 rx=40i,tx=55i,core=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`network,host=server04,region=east,core=4 rx=40i,tx=60i,core=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`network,host=server05,region=west,core=1 rx=50i,tx=70i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - fmt.Sprintf(`network,host=server06,region=east,core=2 rx=50i,tx=40i,core=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), - fmt.Sprintf(`network,host=server07,region=west,core=3 rx=70i,tx=30i,core=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`network,host=server08,region=east,core=4 rx=90i,tx=10i,core=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), - fmt.Sprintf(`network,host=server09,region=east,core=1 rx=5i,tx=4i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:20Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "baseline", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM network`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","core","host","region","rx","tx"],"values":[["2000-01-01T00:00:00Z",2,"server01","west",10,20],["2000-01-01T00:00:10Z",3,"server02","west",40,50],["2000-01-01T00:00:20Z",4,"server03","east",40,55],["2000-01-01T00:00:30Z",1,"server04","east",40,60],["2000-01-01T00:00:40Z",2,"server05","west",50,70],["2000-01-01T00:00:50Z",3,"server06","east",50,40],["2000-01-01T00:01:00Z",4,"server07","west",70,30],["2000-01-01T00:01:10Z",1,"server08","east",90,10],["2000-01-01T00:01:20Z",2,"server09","east",5,4]]}]}]}`, - }, - &Query{ - name: "max - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",90]]}]}]}`, - }, - &Query{ - name: "max - baseline 30s - epoch ms", - params: url.Values{"db": []string{"db0"}, "epoch": []string{"ms"}}, - command: `SELECT max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: fmt.Sprintf( - `{"results":[{"series":[{"name":"network","columns":["time","max"],"values":[[%d,40],[%d,50],[%d,90]]}]}]}`, - mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()/int64(time.Millisecond), - mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()/int64(time.Millisecond), - mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()/int64(time.Millisecond), - ), - }, - &Query{ - name: "max - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","max"],"values":[["2000-01-01T00:00:00Z",50,40],["2000-01-01T00:00:30Z",70,50],["2000-01-01T00:01:00Z",10,90]]}]}]}`, - }, - &Query{ - name: "max - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","max"],"values":[["2000-01-01T00:00:10Z",40],["2000-01-01T00:00:40Z",50],["2000-01-01T00:01:10Z",90]]}]}]}`, - }, - &Query{ - name: "max - time and tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, tx, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","max"],"values":[["2000-01-01T00:00:10Z",50,40],["2000-01-01T00:00:40Z",70,50],["2000-01-01T00:01:10Z",10,90]]}]}]}`, - }, - &Query{ - name: "min - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",5]]}]}]}`, - }, - &Query{ - name: "min - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","min"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",4,5]]}]}]}`, - }, - &Query{ - name: "min - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:20Z",5]]}]}]}`, - }, - &Query{ - name: "min - time and tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, tx, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","min"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:20Z",4,5]]}]}]}`, - }, - &Query{ - name: "max,min - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max(rx), min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","max","min"],"values":[["2000-01-01T00:00:00Z",40,10],["2000-01-01T00:00:30Z",50,40],["2000-01-01T00:01:00Z",90,5]]}]}]}`, - }, - &Query{ - name: "first - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",70]]}]}]}`, - }, - &Query{ - name: "first - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, tx, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","first"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",30,70]]}]}]}`, - }, - &Query{ - name: "first - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",70]]}]}]}`, - }, - &Query{ - name: "first - time and tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, tx, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","first"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",30,70]]}]}]}`, - }, - &Query{ - name: "last - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",5]]}]}]}`, - }, - &Query{ - name: "last - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","last"],"values":[["2000-01-01T00:00:00Z",55,40],["2000-01-01T00:00:30Z",40,50],["2000-01-01T00:01:00Z",4,5]]}]}]}`, - }, - &Query{ - name: "last - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","last"],"values":[["2000-01-01T00:00:20Z",40],["2000-01-01T00:00:50Z",50],["2000-01-01T00:01:20Z",5]]}]}]}`, - }, - &Query{ - name: "last - time and tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, tx, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","last"],"values":[["2000-01-01T00:00:20Z",55,40],["2000-01-01T00:00:50Z",40,50],["2000-01-01T00:01:20Z",4,5]]}]}]}`, - }, - &Query{ - name: "count - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:30Z",3],["2000-01-01T00:01:00Z",3]]}]}]}`, - }, - &Query{ - name: "count - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, - }, - &Query{ - name: "count - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, - }, - &Query{ - name: "distinct - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","distinct"],"values":[["2000-01-01T00:00:00Z",[10,40]],["2000-01-01T00:00:30Z",[40,50]],["2000-01-01T00:01:00Z",[5,70,90]]]}]}]}`, - }, - &Query{ - name: "distinct - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: aggregate function distinct() can not be combined with other functions or fields"}`, - }, - &Query{ - name: "distinct - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: aggregate function distinct() can not be combined with other functions or fields"}`, - }, - &Query{ - name: "mean - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",30],["2000-01-01T00:00:30Z",46.666666666666664],["2000-01-01T00:01:00Z",55]]}]}]}`, - }, - &Query{ - name: "mean - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, - }, - &Query{ - name: "mean - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, - }, - &Query{ - name: "median - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","median"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70]]}]}]}`, - }, - &Query{ - name: "median - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, - }, - &Query{ - name: "median - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, - }, - &Query{ - name: "spread - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","spread"],"values":[["2000-01-01T00:00:00Z",30],["2000-01-01T00:00:30Z",10],["2000-01-01T00:01:00Z",85]]}]}]}`, - }, - &Query{ - name: "spread - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, - }, - &Query{ - name: "spread - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, - }, - &Query{ - name: "stddev - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","stddev"],"values":[["2000-01-01T00:00:00Z",17.320508075688775],["2000-01-01T00:00:30Z",5.773502691896258],["2000-01-01T00:01:00Z",44.44097208657794]]}]}]}`, - }, - &Query{ - name: "stddev - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, - }, - &Query{ - name: "stddev - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, - }, - &Query{ - name: "percentile - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"series":[{"name":"network","columns":["time","percentile"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70]]}]}]}`, - }, - &Query{ - name: "percentile - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, - }, - &Query{ - name: "percentile - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_TopInt(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - // cpu data with overlapping duplicate values - // hour 0 - fmt.Sprintf(`cpu,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02 value=3.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`cpu,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - // hour 1 - fmt.Sprintf(`cpu,host=server04 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server05 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:10Z").UnixNano()), - fmt.Sprintf(`cpu,host=server06 value=6.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:20Z").UnixNano()), - // hour 2 - fmt.Sprintf(`cpu,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:10Z").UnixNano()), - - // memory data - // hour 0 - fmt.Sprintf(`memory,host=a,service=redis value=1000i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`memory,host=b,service=mysql value=2000i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`memory,host=b,service=redis value=1500i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - // hour 1 - fmt.Sprintf(`memory,host=a,service=redis value=1001i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), - fmt.Sprintf(`memory,host=b,service=mysql value=2001i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), - fmt.Sprintf(`memory,host=b,service=redis value=1501i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), - // hour 2 - fmt.Sprintf(`memory,host=a,service=redis value=1002i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), - fmt.Sprintf(`memory,host=b,service=mysql value=2002i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), - fmt.Sprintf(`memory,host=b,service=redis value=1502i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "top - cpu", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 1) FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - &Query{ - name: "top - cpu - 2 values", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 2) FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - &Query{ - name: "top - cpu - 3 values - sorts on tie properly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 3) FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - &Query{ - name: "top - cpu - with tag", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, host, 2) FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top","host"],"values":[["2000-01-01T01:00:10Z",7,"server05"],["2000-01-01T02:00:10Z",9,"server08"]]}]}]}`, - }, - &Query{ - name: "top - cpu - 3 values with limit 2", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 3) FROM cpu limit 2`, - exp: `{"error":"error parsing query: limit (3) in top function can not be larger than the LIMIT (2) in the select statement"}`, - }, - &Query{ - name: "top - cpu - hourly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T01:00:00Z",7],["2000-01-01T02:00:00Z",9]]}]}]}`, - }, - &Query{ - name: "top - cpu - time specified - hourly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - &Query{ - name: "top - cpu - time specified - hourly - epoch ms", - params: url.Values{"db": []string{"db0"}, "epoch": []string{"ms"}}, - command: `SELECT time, TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: fmt.Sprintf( - `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[[%d,4],[%d,7],[%d,9]]}]}]}`, - mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()/int64(time.Millisecond), - mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:10Z").UnixNano()/int64(time.Millisecond), - mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:10Z").UnixNano()/int64(time.Millisecond), - ), - }, - &Query{ - name: "top - cpu - time specified (not first) - hourly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 1), time FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - &Query{ - name: "top - cpu - 2 values hourly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 2) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`, - }, - &Query{ - name: "top - cpu - time specified - 2 values hourly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 2), time FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - &Query{ - name: "top - cpu - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 3) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:00Z",2],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T01:00:00Z",5],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`, - }, - &Query{ - name: "top - cpu - time specified - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 3), time FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:00Z",5],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - &Query{ - name: "top - memory - 2 values, two tags", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 2), host, service FROM memory`, - exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T01:00:00Z",2001,"b","mysql"],["2000-01-01T02:00:00Z",2002,"b","mysql"]]}]}]}`, - }, - &Query{ - name: "top - memory - host tag with limit 2", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, host, 2) FROM memory`, - exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host"],"values":[["2000-01-01T02:00:00Z",2002,"b"],["2000-01-01T02:00:00Z",1002,"a"]]}]}]}`, - }, - &Query{ - name: "top - memory - host tag with limit 2, service tag in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, host, 2), service FROM memory`, - exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, - }, - &Query{ - name: "top - memory - service tag with limit 2, host tag in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, service, 2), host FROM memory`, - exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","service","host"],"values":[["2000-01-01T02:00:00Z",2002,"mysql","b"],["2000-01-01T02:00:00Z",1502,"redis","b"]]}]}]}`, - }, - &Query{ - name: "top - memory - host and service tag with limit 2", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, host, service, 2) FROM memory`, - exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1502,"b","redis"]]}]}]}`, - }, - &Query{ - name: "top - memory - host tag with limit 2 with service tag in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, host, 2), service FROM memory`, - exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, - }, - &Query{ - name: "top - memory - host and service tag with limit 3", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, host, service, 3) FROM memory`, - exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1502,"b","redis"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, - }, - - // TODO - // - Test that specifiying fields or tags in the function will rewrite the query to expand them to the fields - // - Test that a field can be used in the top function - // - Test that asking for a field will come back before a tag if they have the same name for a tag and a field - // - Test that `select top(value, host, 2)` when there is only one value for `host` it will only bring back one value - // - Test that `select top(value, host, 4) from foo where time > now() - 1d and time < now() group by time(1h)` and host is unique in some time buckets that it returns only the unique ones, and not always 4 values - - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP: %s", query.name) - continue - } - - println(">>>>", query.name) - if query.name != `top - memory - host tag with limit 2` { // FIXME: temporary - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Test various aggregates when different series only have data for the same timestamp. -func TestServer_Query_Aggregates_IdenticalTime(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`series,host=a value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=b value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=c value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=d value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=e value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=f value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=g value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=h value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=i value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "last from multiple series with identical timestamp", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT last(value) FROM "series"`, - exp: `{"results":[{"series":[{"name":"series","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",5]]}]}]}`, - repeat: 100, - }, - &Query{ - name: "first from multiple series with identical timestamp", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT first(value) FROM "series"`, - exp: `{"results":[{"series":[{"name":"series","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",5]]}]}]}`, - repeat: 100, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - for n := 0; n <= query.repeat; n++ { - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } - } -} - -// This will test that when using a group by, that it observes the time you asked for -// but will only put the values in the bucket that match the time range -func TestServer_Query_GroupByTimeCutoffs(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu value=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu value=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - fmt.Sprintf(`cpu value=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:05Z").UnixNano()), - fmt.Sprintf(`cpu value=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:08Z").UnixNano()), - fmt.Sprintf(`cpu value=5i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:09Z").UnixNano()), - fmt.Sprintf(`cpu value=6i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "sum all time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",21]]}]}]}`, - }, - &Query{ - name: "sum all time grouped by time 5s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, - }, - &Query{ - name: "sum all time grouped by time 5s missing first point", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, - }, - &Query{ - name: "sum all time grouped by time 5s missing first points (null for bucket)", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:02Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, - }, - &Query{ - name: "sum all time grouped by time 5s missing last point - 2 time intervals", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:09Z' group by time(5s)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",12]]}]}]}`, - }, - &Query{ - name: "sum all time grouped by time 5s missing last 2 points - 2 time intervals", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:08Z' group by time(5s)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",7]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Write_Precision(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []struct { - write string - params url.Values - }{ - { - write: fmt.Sprintf("cpu_n0_precision value=1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").UnixNano()), - }, - { - write: fmt.Sprintf("cpu_n1_precision value=1.1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").UnixNano()), - params: url.Values{"precision": []string{"n"}}, - }, - { - write: fmt.Sprintf("cpu_u_precision value=100 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Microsecond).UnixNano()/int64(time.Microsecond)), - params: url.Values{"precision": []string{"u"}}, - }, - { - write: fmt.Sprintf("cpu_ms_precision value=200 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Millisecond).UnixNano()/int64(time.Millisecond)), - params: url.Values{"precision": []string{"ms"}}, - }, - { - write: fmt.Sprintf("cpu_s_precision value=300 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Second).UnixNano()/int64(time.Second)), - params: url.Values{"precision": []string{"s"}}, - }, - { - write: fmt.Sprintf("cpu_m_precision value=400 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Minute).UnixNano()/int64(time.Minute)), - params: url.Values{"precision": []string{"m"}}, - }, - { - write: fmt.Sprintf("cpu_h_precision value=500 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Hour).UnixNano()/int64(time.Hour)), - params: url.Values{"precision": []string{"h"}}, - }, - } - - test := NewTest("db0", "rp0") - - test.addQueries([]*Query{ - &Query{ - name: "point with nanosecond precision time - no precision specified on write", - command: `SELECT * FROM cpu_n0_precision`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu_n0_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012345Z",1]]}]}]}`, - }, - &Query{ - name: "point with nanosecond precision time", - command: `SELECT * FROM cpu_n1_precision`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu_n1_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012345Z",1.1]]}]}]}`, - }, - &Query{ - name: "point with microsecond precision time", - command: `SELECT * FROM cpu_u_precision`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu_u_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012Z",100]]}]}]}`, - }, - &Query{ - name: "point with millisecond precision time", - command: `SELECT * FROM cpu_ms_precision`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu_ms_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789Z",200]]}]}]}`, - }, - &Query{ - name: "point with second precision time", - command: `SELECT * FROM cpu_s_precision`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu_s_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56Z",300]]}]}]}`, - }, - &Query{ - name: "point with minute precision time", - command: `SELECT * FROM cpu_m_precision`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu_m_precision","columns":["time","value"],"values":[["2000-01-01T12:34:00Z",400]]}]}]}`, - }, - &Query{ - name: "point with hour precision time", - command: `SELECT * FROM cpu_h_precision`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu_h_precision","columns":["time","value"],"values":[["2000-01-01T12:00:00Z",500]]}]}]}`, - }, - }...) - - // we are doing writes that require parameter changes, so we are fighting the test harness a little to make this happen properly - for _, w := range writes { - test.write = w.write - test.params = w.params - test.initialized = false - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Wildcards(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`wildcard,region=us-east value=10 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-east valx=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-east value=30,valx=40 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - - fmt.Sprintf(`wgroup,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`wgroup,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`wgroup,region=us-west value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - - fmt.Sprintf(`m1,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`m2,host=server01 field=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "wildcard", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM wildcard`, - exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","region","value","valx"],"values":[["2000-01-01T00:00:00Z","us-east",10,null],["2000-01-01T00:00:10Z","us-east",null,20],["2000-01-01T00:00:20Z","us-east",30,40]]}]}]}`, - }, - &Query{ - name: "wildcard with group by", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM wildcard GROUP BY *`, - exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, - }, - &Query{ - name: "GROUP BY queries", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(value) FROM wgroup GROUP BY *`, - exp: `{"results":[{"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",30]]}]}]}`, - }, - &Query{ - name: "GROUP BY queries with time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(value) FROM wgroup WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:00Z' GROUP BY *,TIME(1m)`, - exp: `{"results":[{"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",30]]}]}]}`, - }, - &Query{ - name: "wildcard and field in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT value, * FROM wildcard`, - exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","region","value","valx"],"values":[["2000-01-01T00:00:00Z","us-east",10,null],["2000-01-01T00:00:10Z","us-east",null,20],["2000-01-01T00:00:20Z","us-east",30,40]]}]}]}`, - }, - &Query{ - name: "field and wildcard in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT value, * FROM wildcard`, - exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","region","value","valx"],"values":[["2000-01-01T00:00:00Z","us-east",10,null],["2000-01-01T00:00:10Z","us-east",null,20],["2000-01-01T00:00:20Z","us-east",30,40]]}]}]}`, - }, - &Query{ - name: "field and wildcard in group by", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM wildcard GROUP BY region, *`, - exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, - }, - &Query{ - name: "wildcard and field in group by", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM wildcard GROUP BY *, region`, - exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, - }, - &Query{ - name: "wildcard with multiple measurements", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM m1, m2`, - exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, - }, - &Query{ - name: "wildcard with multiple measurements via regex", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM /^m.*/`, - exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, - }, - &Query{ - name: "wildcard with multiple measurements via regex and limit", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM db0../^m.*/ LIMIT 2`, - exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_WildcardExpansion(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`wildcard,region=us-east,host=A value=10,cpu=80 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-east,host=B value=20,cpu=90 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-west,host=B value=30,cpu=70 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-east,host=A value=40,cpu=60 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - - fmt.Sprintf(`dupnames,region=us-east,day=1 value=10,day=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`dupnames,region=us-east,day=2 value=20,day=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`dupnames,region=us-west,day=3 value=30,day=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "wildcard", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM wildcard`, - exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","cpu","host","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, - }, - &Query{ - name: "no wildcard in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT cpu, host, region, value FROM wildcard`, - exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","cpu","host","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, - }, - &Query{ - name: "no wildcard in select, preserve column order", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT host, cpu, region, value FROM wildcard`, - exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","host","cpu","region","value"],"values":[["2000-01-01T00:00:00Z","A",80,"us-east",10],["2000-01-01T00:00:10Z","B",90,"us-east",20],["2000-01-01T00:00:20Z","B",70,"us-west",30],["2000-01-01T00:00:30Z","A",60,"us-east",40]]}]}]}`, - }, - - &Query{ - name: "only tags, no fields", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT host, region FROM wildcard`, - exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, - }, - - &Query{ - name: "no wildcard with alias", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT cpu as c, host as h, region, value FROM wildcard`, - exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","c","h","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, - }, - &Query{ - name: "duplicate tag and field key, always favor field over tag", - command: `SELECT * FROM dupnames`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"dupnames","columns":["time","day","region","value"],"values":[["2000-01-01T00:00:00Z",3,"us-east",10],["2000-01-01T00:00:10Z",2,"us-east",20],["2000-01-01T00:00:20Z",1,"us-west",30]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_AcrossShardsAndFields(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu load=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu load=200 %d`, mustParseTime(time.RFC3339Nano, "2010-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu core=4 %d`, mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "two results for cpu", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT load FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2000-01-01T00:00:00Z",100],["2010-01-01T00:00:00Z",200]]}]}]}`, - }, - &Query{ - name: "two results for cpu, multi-select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT core,load FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core","load"],"values":[["2000-01-01T00:00:00Z",null,100],["2010-01-01T00:00:00Z",null,200],["2015-01-01T00:00:00Z",4,null]]}]}]}`, - }, - &Query{ - name: "two results for cpu, wildcard select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core","load"],"values":[["2000-01-01T00:00:00Z",null,100],["2010-01-01T00:00:00Z",null,200],["2015-01-01T00:00:00Z",4,null]]}]}]}`, - }, - &Query{ - name: "one result for core", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT core FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2015-01-01T00:00:00Z",4]]}]}]}`, - }, - &Query{ - name: "empty result set from non-existent field", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT foo FROM cpu`, - exp: `{"results":[{}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Where_Fields(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu alert_id="alert",tenant_id="tenant",_cust="johnson brothers" %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf(`cpu alert_id="alert",tenant_id="tenant",_cust="johnson brothers" %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - - fmt.Sprintf(`cpu load=100.0,core=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`cpu load=80.0,core=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:01:02Z").UnixNano()), - - fmt.Sprintf(`clicks local=true %d`, mustParseTime(time.RFC3339Nano, "2014-11-10T23:00:01Z").UnixNano()), - fmt.Sprintf(`clicks local=false %d`, mustParseTime(time.RFC3339Nano, "2014-11-10T23:00:02Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - // non type specific - &Query{ - name: "missing measurement with group by", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT load from missing group by *`, - exp: `{"results":[{}]}`, - }, - - // string - &Query{ - name: "single string field", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id FROM cpu WHERE alert_id='alert'`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert"]]}]}]}`, - }, - &Query{ - name: "string AND query, all fields in SELECT", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id,tenant_id,_cust FROM cpu WHERE alert_id='alert' AND tenant_id='tenant'`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id","tenant_id","_cust"],"values":[["2015-02-28T01:03:36.703820946Z","alert","tenant","johnson brothers"]]}]}]}`, - }, - &Query{ - name: "string AND query, all fields in SELECT, one in parenthesis", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id,tenant_id FROM cpu WHERE alert_id='alert' AND (tenant_id='tenant')`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id","tenant_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert","tenant"]]}]}]}`, - }, - &Query{ - name: "string underscored field", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id FROM cpu WHERE _cust='johnson brothers'`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert"]]}]}]}`, - }, - &Query{ - name: "string no match", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id FROM cpu WHERE _cust='acme'`, - exp: `{"results":[{}]}`, - }, - - // float64 - &Query{ - name: "float64 GT no match", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load > 100`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "float64 GTE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load >= 100`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, - }, - &Query{ - name: "float64 EQ match upper bound", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load = 100`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, - }, - &Query{ - name: "float64 LTE match two", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load <= 100`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100],["2009-11-10T23:01:02Z",80]]}]}]}`, - }, - &Query{ - name: "float64 GT match one", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load > 99`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, - }, - &Query{ - name: "float64 EQ no match", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load = 99`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "float64 LT match one", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load < 99`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:01:02Z",80]]}]}]}`, - }, - &Query{ - name: "float64 LT no match", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load < 80`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "float64 NE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load != 100`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:01:02Z",80]]}]}]}`, - }, - - // int64 - &Query{ - name: "int64 GT no match", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core > 4`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "int64 GTE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core >= 4`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, - }, - &Query{ - name: "int64 EQ match upper bound", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core = 4`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, - }, - &Query{ - name: "int64 LTE match two ", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core <= 4`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4],["2009-11-10T23:01:02Z",2]]}]}]}`, - }, - &Query{ - name: "int64 GT match one", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core > 3`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, - }, - &Query{ - name: "int64 EQ no match", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core = 3`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "int64 LT match one", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core < 3`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:01:02Z",2]]}]}]}`, - }, - &Query{ - name: "int64 LT no match", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core < 2`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "int64 NE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core != 4`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:01:02Z",2]]}]}]}`, - }, - - // bool - &Query{ - name: "bool EQ match true", - params: url.Values{"db": []string{"db0"}}, - command: `select local from clicks where local = true`, - exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:01Z",true]]}]}]}`, - }, - &Query{ - name: "bool EQ match false", - params: url.Values{"db": []string{"db0"}}, - command: `select local from clicks where local = false`, - exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:02Z",false]]}]}]}`, - }, - - &Query{ - name: "bool NE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select local from clicks where local != true`, - exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:02Z",false]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Where_With_Tags(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`where_events,tennant=paul foo="bar" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`where_events,tennant=paul foo="baz" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), - fmt.Sprintf(`where_events,tennant=paul foo="bat" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), - fmt.Sprintf(`where_events,tennant=todd foo="bar" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), - fmt.Sprintf(`where_events,tennant=david foo="bap" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "tag field and time", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from where_events where (tennant = 'paul' OR tennant = 'david') AND time > 1s AND (foo = 'bar' OR foo = 'baz' OR foo = 'bap')`, - exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, - }, - &Query{ - name: "where on tag that should be double quoted but isn't", - params: url.Values{"db": []string{"db0"}}, - command: `show series where data-center = 'foo'`, - exp: `{"results":[{"error":"invalid expression: data - center = 'foo'"}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_LimitAndOffset(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`limited,tennant=paul foo=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`limited,tennant=paul foo=3 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), - fmt.Sprintf(`limited,tennant=paul foo=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), - fmt.Sprintf(`limited,tennant=todd foo=5 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "limit on points", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from "limited" LIMIT 2`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3]]}]}]}`, - }, - &Query{ - name: "limit higher than the number of data points", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from "limited" LIMIT 20`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4],["2009-11-10T23:00:05Z",5]]}]}]}`, - }, - &Query{ - name: "limit and offset", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from "limited" LIMIT 2 OFFSET 1`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4]]}]}]}`, - }, - &Query{ - name: "limit + offset equal to total number of points", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from "limited" LIMIT 3 OFFSET 3`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, - }, - &Query{ - name: "limit - offset higher than number of points", - command: `select foo from "limited" LIMIT 2 OFFSET 20`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "limit on points with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "limit higher than the number of data points with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 20`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4],["2009-11-10T23:00:05Z",5]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "limit and offset with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2 OFFSET 1`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "limit + offset equal to the number of points with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 3 OFFSET 3`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "limit - offset higher than number of points with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2 OFFSET 20`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "limit higher than the number of data points should error", - command: `select mean(foo) from "limited" where time > '2000-01-01T00:00:00Z' group by time(1s), * fill(0) limit 2147483647`, - exp: `{"results":[{"error":"too many points in the group by interval. maybe you forgot to specify a where time clause?"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "limit1 higher than MaxGroupBy but the number of data points is less than MaxGroupBy", - command: `select mean(foo) from "limited" where time >= '2009-11-10T23:00:02Z' and time < '2009-11-10T23:00:03Z' group by time(1s), * fill(0) limit 2147483647`, - exp: `{"results":[{"series":[{"name":"limited","tags":{"tennant":"paul"},"columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2]]},{"name":"limited","tags":{"tennant":"todd"},"columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",0]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Fill(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`fills val=3 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`fills val=5 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), - fmt.Sprintf(`fills val=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), - fmt.Sprintf(`fills val=10 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:16Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "fill with value", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(1)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill with value, WHERE all values match condition", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val < 50 group by time(5s) FILL(1)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill with value, WHERE no values match condition", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val > 50 group by time(5s) FILL(1)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",1],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill with previous", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(previous)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",4],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill with none, i.e. clear out nulls", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(none)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill defaults to null", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",null],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill with count aggregate defaults to null", - command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",null],["2009-11-10T23:00:15Z",1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill with count aggregate defaults to null, no values match", - command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val > 100 group by time(5s)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",null],["2009-11-10T23:00:05Z",null],["2009-11-10T23:00:10Z",null],["2009-11-10T23:00:15Z",null]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill with count aggregate specific value", - command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) fill(1234)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",1234],["2009-11-10T23:00:15Z",1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Chunk(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := make([]string, 10001) // 10,000 is the default chunking size, even when no chunking requested. - expectedValues := make([]string, len(writes)) - for i := 0; i < len(writes); i++ { - writes[i] = fmt.Sprintf(`cpu value=%d %d`, i, time.Unix(0, int64(i)).UnixNano()) - expectedValues[i] = fmt.Sprintf(`["%s",%d]`, time.Unix(0, int64(i)).UTC().Format(time.RFC3339Nano), i) - } - expected := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[%s]}]}]}`, strings.Join(expectedValues, ",")) - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "SELECT all values, no chunking", - command: `SELECT value FROM cpu`, - exp: expected, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } - -} - -func TestServer_Query_DropAndRecreateMeasurement(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`memory,host=serverB,region=uswest val=33.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "Drop Measurement, series tags preserved tests", - command: `SHOW MEASUREMENTS`, - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["memory"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "show series", - command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]},{"name":"memory","columns":["_key","host","region"],"values":[["memory,host=serverB,region=uswest","serverB","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "ensure we can query for memory with both tags", - command: `SELECT * FROM memory where region='uswest' and host='serverB' GROUP BY *`, - exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "drop measurement cpu", - command: `DROP MEASUREMENT cpu`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "verify measurements", - command: `SHOW MEASUREMENTS`, - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["memory"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "verify series", - command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"memory","columns":["_key","host","region"],"values":[["memory,host=serverB,region=uswest","serverB","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "verify cpu measurement is gone", - command: `SELECT * FROM cpu`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "verify selecting from a tag 'host' still works", - command: `SELECT * FROM memory where host='serverB' GROUP BY *`, - exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "verify selecting from a tag 'region' still works", - command: `SELECT * FROM memory where region='uswest' GROUP BY *`, - exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "verify selecting from a tag 'host' and 'region' still works", - command: `SELECT * FROM memory where region='uswest' and host='serverB' GROUP BY *`, - exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Drop non-existant measurement", - command: `DROP MEASUREMENT doesntexist`, - exp: `{"results":[{"error":"measurement not found: doesntexist"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - // Test that re-inserting the measurement works fine. - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } - - test = NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "verify measurements after recreation", - command: `SHOW MEASUREMENTS`, - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["memory"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "verify cpu measurement has been re-inserted", - command: `SELECT * FROM cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_ShowSeries(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:01Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), - fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:07Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: `show series`, - command: "SHOW SERIES", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]},{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server02,region=useast","server02","useast"],["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show series from measurement`, - command: "SHOW SERIES FROM cpu", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show series from regular expression`, - command: "SHOW SERIES FROM /[cg]pu/", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server02,region=useast","server02","useast"],["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show series with where tag`, - command: "SHOW SERIES WHERE region = 'uswest'", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01,region=uswest","server01","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show series where tag matches regular expression`, - command: "SHOW SERIES WHERE region =~ /ca.*/", - exp: `{"results":[{"series":[{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show series`, - command: "SHOW SERIES WHERE host !~ /server0[12]/", - exp: `{"results":[{"series":[{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show series with from and where`, - command: "SHOW SERIES FROM cpu WHERE region = 'useast'", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show series with WHERE time should fail`, - command: "SHOW SERIES WHERE time > now() - 1h", - exp: `{"results":[{"error":"SHOW SERIES doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show series with WHERE field should fail`, - command: "SHOW SERIES WHERE value > 10.0", - exp: `{"results":[{"error":"SHOW SERIES doesn't support fields in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_ShowMeasurements(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`other,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: `show measurements with limit 2`, - command: "SHOW MEASUREMENTS LIMIT 2", - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show measurements using WITH`, - command: "SHOW MEASUREMENTS WITH MEASUREMENT = cpu", - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show measurements using WITH and regex`, - command: "SHOW MEASUREMENTS WITH MEASUREMENT =~ /[cg]pu/", - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show measurements using WITH and regex - no matches`, - command: "SHOW MEASUREMENTS WITH MEASUREMENT =~ /.*zzzzz.*/", - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show measurements where tag matches regular expression`, - command: "SHOW MEASUREMENTS WHERE region =~ /ca.*/", - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["gpu"],["other"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show measurements where tag does not match a regular expression`, - command: "SHOW MEASUREMENTS WHERE region !~ /ca.*/", - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show measurements with time in WHERE clauses errors`, - command: `SHOW MEASUREMENTS WHERE time > now() - 1h`, - exp: `{"results":[{"error":"SHOW MEASUREMENTS doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_ShowTagKeys(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: `show tag keys`, - command: "SHOW TAG KEYS", - exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"disk","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "show tag keys from", - command: "SHOW TAG KEYS FROM cpu", - exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "show tag keys from regex", - command: "SHOW TAG KEYS FROM /[cg]pu/", - exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "show tag keys measurement not found", - command: "SHOW TAG KEYS FROM doesntexist", - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "show tag keys with time in WHERE clause errors", - command: "SHOW TAG KEYS FROM cpu WHERE time > now() - 1h", - exp: `{"results":[{"error":"SHOW TAG KEYS doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "show tag values with key", - command: "SHOW TAG VALUES WITH KEY = host", - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"],["server02"],["server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show tag values with key and where`, - command: `SHOW TAG VALUES FROM cpu WITH KEY = host WHERE region = 'uswest'`, - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show tag values with key and where matches regular expression`, - command: `SHOW TAG VALUES WITH KEY = host WHERE region =~ /ca.*/`, - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show tag values with key and where does not matche regular expression`, - command: `SHOW TAG VALUES WITH KEY = region WHERE host !~ /server0[12]/`, - exp: `{"results":[{"series":[{"name":"regionTagValues","columns":["region"],"values":[["caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show tag values with key in and where does not matche regular expression`, - command: `SHOW TAG VALUES FROM cpu WITH KEY IN (host, region) WHERE region = 'uswest'`, - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"]]},{"name":"regionTagValues","columns":["region"],"values":[["uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show tag values with key and measurement matches regular expression`, - command: `SHOW TAG VALUES FROM /[cg]pu/ WITH KEY = host`, - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"],["server02"],["server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show tag values with key and time in WHERE clause should error`, - command: `SHOW TAG VALUES WITH KEY = host WHERE time > now() - 1h`, - exp: `{"results":[{"error":"SHOW TAG VALUES doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_ShowFieldKeys(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 field1=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server01,region=useast field4=200,field5=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server03,region=caeast field6=200,field7=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast field8=200,field9=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: `show field keys`, - command: `SHOW FIELD KEYS`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]},{"name":"disk","columns":["fieldKey"],"values":[["field8"],["field9"]]},{"name":"gpu","columns":["fieldKey"],"values":[["field4"],["field5"],["field6"],["field7"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show field keys from measurement`, - command: `SHOW FIELD KEYS FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show field keys measurement with regex`, - command: `SHOW FIELD KEYS FROM /[cg]pu/`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]},{"name":"gpu","columns":["fieldKey"],"values":[["field4"],["field5"],["field6"],["field7"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_ContinuousQuery(t *testing.T) { - t.Skip() - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - runTest := func(test *Test, t *testing.T) { - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } - } - - // Start times of CQ intervals. - interval0 := time.Now().Add(-time.Second).Round(time.Second * 5) - interval1 := interval0.Add(-time.Second * 5) - interval2 := interval0.Add(-time.Second * 10) - interval3 := interval0.Add(-time.Second * 15) - - writes := []string{ - // Point too far in the past for CQ to pick up. - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, interval3.Add(time.Second).UnixNano()), - - // Points two intervals ago. - fmt.Sprintf(`cpu,host=server01 value=100 %d`, interval2.Add(time.Second).UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, interval2.Add(time.Second*2).UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, interval2.Add(time.Second*3).UnixNano()), - - // Points one interval ago. - fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, interval1.Add(time.Second).UnixNano()), - fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, interval1.Add(time.Second*2).UnixNano()), - - // Points in the current interval. - fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, interval0.Add(time.Second).UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, interval0.Add(time.Second*2).UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - test.addQueries([]*Query{ - &Query{ - name: `create another retention policy for CQ to write into`, - command: `CREATE RETENTION POLICY rp1 ON db0 DURATION 1h REPLICATION 1`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "create continuous query with backreference", - command: `CREATE CONTINUOUS QUERY "cq1" ON db0 BEGIN SELECT count(value) INTO "rp1".:MEASUREMENT FROM /[cg]pu/ GROUP BY time(5s) END`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: `create another retention policy for CQ to write into`, - command: `CREATE RETENTION POLICY rp2 ON db0 DURATION 1h REPLICATION 1`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "create continuous query with backreference and group by time", - command: `CREATE CONTINUOUS QUERY "cq2" ON db0 BEGIN SELECT count(value) INTO "rp2".:MEASUREMENT FROM /[cg]pu/ GROUP BY time(5s), * END`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: `show continuous queries`, - command: `SHOW CONTINUOUS QUERIES`, - exp: `{"results":[{"series":[{"name":"db0","columns":["name","query"],"values":[["cq1","CREATE CONTINUOUS QUERY cq1 ON db0 BEGIN SELECT count(value) INTO \"db0\".\"rp1\".:MEASUREMENT FROM \"db0\".\"rp0\"./[cg]pu/ GROUP BY time(5s) END"],["cq2","CREATE CONTINUOUS QUERY cq2 ON db0 BEGIN SELECT count(value) INTO \"db0\".\"rp2\".:MEASUREMENT FROM \"db0\".\"rp0\"./[cg]pu/ GROUP BY time(5s), * END"]]}]}]}`, - }, - }...) - - // Run first test to create CQs. - runTest(&test, t) - - // Trigger CQs to run. - u := fmt.Sprintf("%s/data/process_continuous_queries?time=%d", s.URL(), interval0.UnixNano()) - if _, err := s.HTTPPost(u, nil); err != nil { - t.Fatal(err) - } - - // Wait for CQs to run. TODO: fix this ugly hack - time.Sleep(time.Second * 5) - - // Setup tests to check the CQ results. - test2 := NewTest("db0", "rp1") - test2.addQueries([]*Query{ - &Query{ - name: "check results of cq1", - command: `SELECT * FROM "rp1"./[cg]pu/`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count","host","region","value"],"values":[["` + interval2.UTC().Format(time.RFC3339Nano) + `",3,null,null,null]]},{"name":"gpu","columns":["time","count","host","region","value"],"values":[["` + interval1.UTC().Format(time.RFC3339Nano) + `",2,null,null,null],["` + interval0.UTC().Format(time.RFC3339Nano) + `",1,null,null,null]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - // TODO: restore this test once this is fixed: https://github.com/influxdb/influxdb/issues/3968 - &Query{ - skip: true, - name: "check results of cq2", - command: `SELECT * FROM "rp2"./[cg]pu/`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count","host","region","value"],"values":[["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","uswest",null],["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","",null],["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","useast",null]]},{"name":"gpu","columns":["time","count","host","region","value"],"values":[["` + interval1.UTC().Format(time.RFC3339Nano) + `",1,"server02","useast",null],["` + interval1.UTC().Format(time.RFC3339Nano) + `",1,"server03","caeast",null],["` + interval0.UTC().Format(time.RFC3339Nano) + `",1,"server03","caeast",null]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - // Run second test to check CQ results. - runTest(&test2, t) -} - -// Tests that a known CQ query with concurrent writes does not deadlock the server -func TestServer_ContinuousQuery_Deadlock(t *testing.T) { - - // Skip until #3517 & #3522 are merged - t.Skip("Skipping CQ deadlock test") - if testing.Short() { - t.Skip("skipping CQ deadlock test") - } - t.Parallel() - s := OpenServer(NewConfig(), "") - defer func() { - s.Close() - // Nil the server so our deadlock detector goroutine can determine if we completed writes - // without timing out - s.Server = nil - }() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - test := NewTest("db0", "rp0") - - test.addQueries([]*Query{ - &Query{ - name: "create continuous query", - command: `CREATE CONTINUOUS QUERY "my.query" ON db0 BEGIN SELECT sum(visits) as visits INTO test_1m FROM myseries GROUP BY time(1m), host END`, - exp: `{"results":[{}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } - - // Deadlock detector. If the deadlock is fixed, this test should complete all the writes in ~2.5s seconds (with artifical delays - // added). After 10 seconds, if the server has not been closed then we hit the deadlock bug. - iterations := 0 - go func(s *Server) { - <-time.After(10 * time.Second) - - // If the server is not nil then the test is still running and stuck. We panic to avoid - // having the whole test suite hang indefinitely. - if s.Server != nil { - panic("possible deadlock. writes did not complete in time") - } - }(s) - - for { - - // After the second write, if the deadlock exists, we'll get a write timeout and - // all subsequent writes will timeout - if iterations > 5 { - break - } - writes := []string{} - for i := 0; i < 1000; i++ { - writes = append(writes, fmt.Sprintf(`myseries,host=host-%d visits=1i`, i)) - } - write := strings.Join(writes, "\n") - - if _, err := s.Write(test.db, test.rp, write, test.params); err != nil { - t.Fatal(err) - } - iterations += 1 - time.Sleep(500 * time.Millisecond) - } -} - -func TestServer_Query_EvilIdentifiers(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - test := NewTest("db0", "rp0") - test.write = fmt.Sprintf("cpu select=1,in-bytes=2 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()) - - test.addQueries([]*Query{ - &Query{ - name: `query evil identifiers`, - command: `SELECT "select", "in-bytes" FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","select","in-bytes"],"values":[["2000-01-01T00:00:00Z",1,2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_OrderByTime(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=server1 value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - fmt.Sprintf(`cpu,host=server1 value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), - fmt.Sprintf(`cpu,host=server1 value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "order on points", - params: url.Values{"db": []string{"db0"}}, - command: `select value from "cpu" ORDER BY time DESC`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:03Z",3],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:01Z",1]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_FieldWithMultiplePeriods(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu foo.bar.baz=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "baseline", - params: url.Values{"db": []string{"db0"}}, - command: `select * from cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, - }, - &Query{ - name: "select field with periods", - params: url.Values{"db": []string{"db0"}}, - command: `select "foo.bar.baz" from cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_FieldWithMultiplePeriodsMeasurementPrefixMatch(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`foo foo.bar.baz=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "baseline", - params: url.Values{"db": []string{"db0"}}, - command: `select * from foo`, - exp: `{"results":[{"series":[{"name":"foo","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, - }, - &Query{ - name: "select field with periods", - params: url.Values{"db": []string{"db0"}}, - command: `select "foo.bar.baz" from foo`, - exp: `{"results":[{"series":[{"name":"foo","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_IntoTarget(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`foo value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`foo value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`foo value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`foo value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`foo value=4,foobar=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "into", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * INTO baz FROM foo`, - exp: `{"results":[{"series":[{"name":"result","columns":["time","written"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, - }, - &Query{ - name: "confirm results", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM baz`, - exp: `{"results":[{"series":[{"name":"baz","columns":["time","foobar","value"],"values":[["2000-01-01T00:00:00Z",null,1],["2000-01-01T00:00:10Z",null,2],["2000-01-01T00:00:20Z",null,3],["2000-01-01T00:00:30Z",null,4],["2000-01-01T00:00:40Z",3,4]]}]}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// This test reproduced a data race with closing the -// Subscriber points channel while writes were in-flight in the PointsWriter. -func TestServer_ConcurrentPointsWriter_Subscriber(t *testing.T) { - t.Parallel() - s := OpenDefaultServer(NewConfig(), "") - defer s.Close() - - // goroutine to write points - done := make(chan struct{}) - go func() { - for { - select { - case <-done: - return - default: - wpr := &cluster.WritePointsRequest{ - Database: "db0", - RetentionPolicy: "rp0", - } - s.PointsWriter.WritePoints(wpr) - } - } - }() - - time.Sleep(10 * time.Millisecond) - - close(done) - // Race occurs on s.Close() -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.md deleted file mode 100644 index 8df37e333..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.md +++ /dev/null @@ -1,150 +0,0 @@ -# Server Integration Tests - -Currently, the file `server_test.go` has integration tests for single node scenarios. -At some point we'll need to add cluster tests, and may add them in a different file, or -rename `server_test.go` to `server_single_node_test.go` or something like that. - -## What is in a test? - -Each test is broken apart effectively into the following areas: - -- Write sample data -- Use cases for table driven test, that include a command (typically a query) and an expected result. - -When each test runs it does the following: - -- init: determines if there are any writes and if so, writes them to the in-memory database -- queries: iterate through each query, executing the command, and comparing the results to the expected result. - -## Idempotent - Allows for parallel tests - -Each test should be `idempotent`, meaning that its data will not be affected by other tests, or use cases within the table tests themselves. -This allows for parallel testing, keeping the test suite total execution time very low. - -### Basic sample test - -```go -// Ensure the server can have a database with multiple measurements. -func TestServer_Query_Multiple_Measurements(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - // Make sure we do writes for measurements that will span across shards - writes := []string{ - fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "measurement in one shard but not another shouldn't panic server", - command: `SELECT host,value FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} -``` - -Let's break this down: - -In this test, we first tell it to run in parallel with the `t.Parallel()` call. - -We then open a new server with: - -```go -s := OpenServer(NewConfig(), "") -defer s.Close() -``` - -If needed, we create a database and default retention policy. This is usually needed -when inserting and querying data. This is not needed if you are testing commands like `CREATE DATABASE`, `SHOW DIAGNOSTICS`, etc. - -```go -if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) -} -``` - -Next, set up the write data you need: - -```go -writes := []string{ - fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), -} -``` -Create a new test with the database and retention policy: - -```go -test := NewTest("db0", "rp0") -``` - -Send in the writes: -```go -test.write = strings.Join(writes, "\n") -``` - -Add some queries (the second one is mocked out to show how to add more than one): - -```go -test.addQueries([]*Query{ - &Query{ - name: "measurement in one shard but not another shouldn't panic server", - command: `SELECT host,value FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`, - }, - &Query{ - name: "another test here...", - command: `Some query command`, - exp: `the expected results`, - }, -}...) -``` - -The rest of the code is boilerplate execution code. It is purposefully not refactored out to a helper -to make sure the test failure reports the proper lines for debugging purposes. - -#### Running the tests - -To run the tests: - -```sh -go test ./cmd/influxd/run -parallel 500 -timeout 10s -``` - -#### Running a specific test - -```sh -go test ./cmd/influxd/run -parallel 500 -timeout 10s -run TestServer_Query_Fill -``` - -#### Verbose feedback - -By default, all logs are silenced when testing. If you pass in the `-v` flag, the test suite becomes verbose, and enables all logging in the system - -```sh -go test ./cmd/influxd/run -parallel 500 -timeout 10s -run TestServer_Query_Fill -v -``` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/errors.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/errors.go deleted file mode 100644 index 7627ee2fd..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/errors.go +++ /dev/null @@ -1,45 +0,0 @@ -package influxdb - -import ( - "errors" - "fmt" - "strings" -) - -var ( - // ErrFieldsRequired is returned when a point does not any fields. - ErrFieldsRequired = errors.New("fields required") - - // ErrFieldTypeConflict is returned when a new field already exists with a different type. - ErrFieldTypeConflict = errors.New("field type conflict") -) - -// ErrDatabaseNotFound indicates that a database operation failed on the -// specified database because the specified database does not exist. -func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } - -// ErrRetentionPolicyNotFound indicates that the named retention policy could -// not be found in the database. -func ErrRetentionPolicyNotFound(name string) error { - return fmt.Errorf("retention policy not found: %s", name) -} - -// IsClientError indicates whether an error is a known client error. -func IsClientError(err error) bool { - if err == nil { - return false - } - - if err == ErrFieldsRequired { - return true - } - if err == ErrFieldTypeConflict { - return true - } - - if strings.Contains(err.Error(), ErrFieldTypeConflict.Error()) { - return true - } - - return false -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/.rvmrc b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/.rvmrc deleted file mode 100644 index a9c1a9ca3..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/.rvmrc +++ /dev/null @@ -1 +0,0 @@ -rvm use ruby-2.1.0@burn-in --create diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile deleted file mode 100644 index b1816e8b6..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile +++ /dev/null @@ -1,4 +0,0 @@ -source 'https://rubygems.org' - -gem "colorize" -gem "influxdb" diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile.lock b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile.lock deleted file mode 100644 index 9e721c3a7..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile.lock +++ /dev/null @@ -1,14 +0,0 @@ -GEM - remote: https://rubygems.org/ - specs: - colorize (0.6.0) - influxdb (0.0.16) - json - json (1.8.1) - -PLATFORMS - ruby - -DEPENDENCIES - colorize - influxdb diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/burn-in.rb b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/burn-in.rb deleted file mode 100644 index 1d44bc2c0..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/burn-in.rb +++ /dev/null @@ -1,79 +0,0 @@ -require "influxdb" -require "colorize" -require "benchmark" - -require_relative "log" -require_relative "random_gaussian" - -BATCH_SIZE = 10_000 - -Log.info "Starting burn-in suite" -master = InfluxDB::Client.new -master.delete_database("burn-in") rescue nil -master.create_database("burn-in") -master.create_database_user("burn-in", "user", "pass") - -master.database = "burn-in" -# master.query "select * from test1 into test2;" -# master.query "select count(value) from test1 group by time(1m) into test2;" - -influxdb = InfluxDB::Client.new "burn-in", username: "user", password: "pass" - -Log.success "Connected to server #{influxdb.host}:#{influxdb.port}" - -Log.log "Creating RandomGaussian(500, 25)" -gaussian = RandomGaussian.new(500, 25) -point_count = 0 - -while true - Log.log "Generating 10,000 points.." - points = [] - BATCH_SIZE.times do |n| - points << {value: gaussian.rand.to_i.abs} - end - point_count += points.length - - Log.info "Sending points to server.." - begin - st = Time.now - foo = influxdb.write_point("test1", points) - et = Time.now - Log.log foo.inspect - Log.log "#{et-st} seconds elapsed" - Log.success "Write successful." - rescue => e - Log.failure "Write failed:" - Log.log e - end - sleep 0.5 - - Log.info "Checking regular points" - st = Time.now - response = influxdb.query("select count(value) from test1;") - et = Time.now - - Log.log "#{et-st} seconds elapsed" - - response_count = response["test1"].first["count"] - if point_count == response_count - Log.success "Point counts match: #{point_count} == #{response_count}" - else - Log.failure "Point counts don't match: #{point_count} != #{response_count}" - end - - # Log.info "Checking continuous query points for test2" - # st = Time.now - # response = influxdb.query("select count(value) from test2;") - # et = Time.now - - # Log.log "#{et-st} seconds elapsed" - - # response_count = response["test2"].first["count"] - # if point_count == response_count - # Log.success "Point counts match: #{point_count} == #{response_count}" - # else - # Log.failure "Point counts don't match: #{point_count} != #{response_count}" - # end -end - - diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/log.rb b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/log.rb deleted file mode 100644 index 0f70d7633..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/log.rb +++ /dev/null @@ -1,23 +0,0 @@ -module Log - def self.info(msg) - print Time.now.strftime("%r") + " | " - puts msg.to_s.colorize(:yellow) - end - - def self.success(msg) - print Time.now.strftime("%r") + " | " - puts msg.to_s.colorize(:green) - end - - def self.failure(msg) - print Time.now.strftime("%r") + " | " - puts msg.to_s.colorize(:red) - end - - def self.log(msg) - print Time.now.strftime("%r") + " | " - puts msg.to_s - end -end - - diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_gaussian.rb b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_gaussian.rb deleted file mode 100644 index 51d6c3c04..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_gaussian.rb +++ /dev/null @@ -1,31 +0,0 @@ -class RandomGaussian - def initialize(mean, stddev, rand_helper = lambda { Kernel.rand }) - @rand_helper = rand_helper - @mean = mean - @stddev = stddev - @valid = false - @next = 0 - end - - def rand - if @valid then - @valid = false - return @next - else - @valid = true - x, y = self.class.gaussian(@mean, @stddev, @rand_helper) - @next = y - return x - end - end - - private - def self.gaussian(mean, stddev, rand) - theta = 2 * Math::PI * rand.call - rho = Math.sqrt(-2 * Math.log(1 - rand.call)) - scale = stddev * rho - x = mean + scale * Math.cos(theta) - y = mean + scale * Math.sin(theta) - return x, y - end -end diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_points.rb b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_points.rb deleted file mode 100644 index 93bc8314f..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_points.rb +++ /dev/null @@ -1,29 +0,0 @@ -require "influxdb" - -ONE_WEEK_IN_SECONDS = 7*24*60*60 -NUM_POINTS = 10_000 -BATCHES = 100 - -master = InfluxDB::Client.new -master.delete_database("ctx") rescue nil -master.create_database("ctx") - -influxdb = InfluxDB::Client.new "ctx" -influxdb.time_precision = "s" - -names = ["foo", "bar", "baz", "quu", "qux"] - -st = Time.now -BATCHES.times do |m| - points = [] - - puts "Writing #{NUM_POINTS} points, time ##{m}.." - NUM_POINTS.times do |n| - timestamp = Time.now.to_i - rand(ONE_WEEK_IN_SECONDS) - points << {value: names.sample, time: timestamp} - end - - influxdb.write_point("ct1", points) -end -puts st -puts Time.now diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/config.sample.toml b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/config.sample.toml deleted file mode 100644 index 848a0647a..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/config.sample.toml +++ /dev/null @@ -1,322 +0,0 @@ -### Welcome to the InfluxDB configuration file. - -# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com -# The data includes raft id (random 8 bytes), os, arch, version, and metadata. -# We don't track ip addresses of servers reporting. This is only used -# to track the number of instances running and the versions, which -# is very helpful for us. -# Change this option to true to disable reporting. -reporting-disabled = false - -### -### Enterprise registration control -### - -[registration] -# enabled = true -# url = "https://enterprise.influxdata.com" # The Enterprise server URL -# token = "" # Registration token for Enterprise server - -### -### [meta] -### -### Controls the parameters for the Raft consensus group that stores metadata -### about the InfluxDB cluster. -### - -[meta] - dir = "/var/opt/influxdb/meta" - hostname = "localhost" - bind-address = ":8088" - retention-autocreate = true - election-timeout = "1s" - heartbeat-timeout = "1s" - leader-lease-timeout = "500ms" - commit-timeout = "50ms" - cluster-tracing = false - - # If enabled, when a Raft cluster loses a peer due to a `DROP SERVER` command, - # the leader will automatically ask a non-raft peer node to promote to a raft - # peer. This only happens if there is a non-raft peer node available to promote. - # This setting only affects the local node, so to ensure if operates correctly, be sure to set - # it in the config of every node. - raft-promotion-enabled = true - -### -### [data] -### -### Controls where the actual shard data for InfluxDB lives and how it is -### flushed from the WAL. "dir" may need to be changed to a suitable place -### for your system, but the WAL settings are an advanced configuration. The -### defaults should work for most systems. -### - -[data] - dir = "/var/opt/influxdb/data" - - # Controls the engine type for new shards. Options are b1, bz1, or tsm1. - # b1 is the 0.9.2 storage engine, bz1 is the 0.9.3 and 0.9.4 engine. - # tsm1 is the 0.9.5 engine and is currenly EXPERIMENTAL. Until 0.9.5 is - # actually released data written into a tsm1 engine may be need to be wiped - # between upgrades. - # engine ="bz1" - - # The following WAL settings are for the b1 storage engine used in 0.9.2. They won't - # apply to any new shards created after upgrading to a version > 0.9.3. - max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB. - wal-flush-interval = "10m" # Maximum time data can sit in WAL before a flush. - wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed. - - # These are the WAL settings for the storage engine >= 0.9.3 - wal-dir = "/var/opt/influxdb/wal" - wal-enable-logging = true - - # When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to - # flush to the index - # wal-ready-series-size = 25600 - - # Flush and compact a partition once this ratio of series are over the ready size - # wal-compaction-threshold = 0.6 - - # Force a flush and compaction if any series in a partition gets above this size in bytes - # wal-max-series-size = 2097152 - - # Force a flush of all series and full compaction if there have been no writes in this - # amount of time. This is useful for ensuring that shards that are cold for writes don't - # keep a bunch of data cached in memory and in the WAL. - # wal-flush-cold-interval = "10m" - - # Force a partition to flush its largest series if it reaches this approximate size in - # bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory. - # The more memory you have, the bigger this can be. - # wal-partition-size-threshold = 20971520 - - # Whether queries should be logged before execution. Very useful for troubleshooting, but will - # log any sensitive data contained within a query. - # query-log-enabled = true - -### -### [hinted-handoff] -### -### Controls the hinted handoff feature, which allows nodes to temporarily -### store queued data when one node of a cluster is down for a short period -### of time. -### - -[hinted-handoff] - enabled = true - dir = "/var/opt/influxdb/hh" - max-size = 1073741824 - max-age = "168h" - retry-rate-limit = 0 - - # Hinted handoff will start retrying writes to down nodes at a rate of once per second. - # If any error occurs, it will backoff in an exponential manner, until the interval - # reaches retry-max-interval. Once writes to all nodes are successfully completed the - # interval will reset to retry-interval. - retry-interval = "1s" - retry-max-interval = "1m" - - # Interval between running checks for data that should be purged. Data is purged from - # hinted-handoff queues for two reasons. 1) The data is older than the max age, or - # 2) the target node has been dropped from the cluster. Data is never dropped until - # it has reached max-age however, for a dropped node or not. - purge-interval = "1h" - -### -### [cluster] -### -### Controls non-Raft cluster behavior, which generally includes how data is -### shared across shards. -### - -[cluster] - shard-writer-timeout = "10s" # The time within which a shard must respond to write. - write-timeout = "5s" # The time within which a write operation must complete on the cluster. - -### -### [retention] -### -### Controls the enforcement of retention policies for evicting old data. -### - -[retention] - enabled = true - check-interval = "30m" - -### -### [shard-precreation] -### -### Controls the precreation of shards, so they are created before data arrives. -### Only shards that will exist in the future, at time of creation, are precreated. - -[shard-precreation] - enabled = true - check-interval = "10m" - advance-period = "30m" - -### -### Controls the system self-monitoring, statistics and diagnostics. -### -### The internal database for monitoring data is created automatically if -### if it does not already exist. The target retention within this database -### is called 'monitor' and is also created with a retention period of 7 days -### and a replication factor of 1, if it does not exist. In all cases the -### this retention policy is configured as the default for the database. - -[monitor] - store-enabled = true # Whether to record statistics internally. - store-database = "_internal" # The destination database for recorded statistics - store-interval = "10s" # The interval at which to record statistics - -### -### [admin] -### -### Controls the availability of the built-in, web-based admin interface. If HTTPS is -### enabled for the admin interface, HTTPS must also be enabled on the [http] service. -### - -[admin] - enabled = true - bind-address = ":8083" - https-enabled = false - https-certificate = "/etc/ssl/influxdb.pem" - -### -### [http] -### -### Controls how the HTTP endpoints are configured. These are the primary -### mechanism for getting data into and out of InfluxDB. -### - -[http] - enabled = true - bind-address = ":8086" - auth-enabled = false - log-enabled = true - write-tracing = false - pprof-enabled = false - https-enabled = false - https-certificate = "/etc/ssl/influxdb.pem" - -### -### [[graphite]] -### -### Controls one or many listeners for Graphite data. -### - -[[graphite]] - enabled = false - # database = "graphite" - # bind-address = ":2003" - # protocol = "tcp" - # consistency-level = "one" - # name-separator = "." - - # These next lines control how batching works. You should have this enabled - # otherwise you could get dropped metrics or poor performance. Batching - # will buffer points in memory if you have many coming in. - - # batch-size = 1000 # will flush if this many points get buffered - # batch-pending = 5 # number of batches that may be pending in memory - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - # udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. - - ## "name-schema" configures tag names for parsing the metric name from graphite protocol; - ## separated by `name-separator`. - ## The "measurement" tag is special and the corresponding field will become - ## the name of the metric. - ## e.g. "type.host.measurement.device" will parse "server.localhost.cpu.cpu0" as - ## { - ## measurement: "cpu", - ## tags: { - ## "type": "server", - ## "host": "localhost, - ## "device": "cpu0" - ## } - ## } - # name-schema = "type.host.measurement.device" - - ## If set to true, when the input metric name has more fields than `name-schema` specified, - ## the extra fields will be ignored. - ## Otherwise an error will be logged and the metric rejected. - # ignore-unnamed = true - -### -### [collectd] -### -### Controls the listener for collectd data. -### - -[collectd] - enabled = false - # bind-address = "" - # database = "" - # typesdb = "" - - # These next lines control how batching works. You should have this enabled - # otherwise you could get dropped metrics or poor performance. Batching - # will buffer points in memory if you have many coming in. - - # batch-size = 1000 # will flush if this many points get buffered - # batch-pending = 5 # number of batches that may be pending in memory - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. - -### -### [opentsdb] -### -### Controls the listener for OpenTSDB data. -### - -[opentsdb] - enabled = false - # bind-address = ":4242" - # database = "opentsdb" - # retention-policy = "" - # consistency-level = "one" - # tls-enabled = false - # certificate= "" - - # These next lines control how batching works. You should have this enabled - # otherwise you could get dropped metrics or poor performance. Only points - # metrics received over the telnet protocol undergo batching. - - # batch-size = 1000 # will flush if this many points get buffered - # batch-pending = 5 # number of batches that may be pending in memory - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - -### -### [[udp]] -### -### Controls the listeners for InfluxDB line protocol data via UDP. -### - -[[udp]] - enabled = false - # bind-address = "" - # database = "udp" - # retention-policy = "" - - # These next lines control how batching works. You should have this enabled - # otherwise you could get dropped metrics or poor performance. Batching - # will buffer points in memory if you have many coming in. - - # batch-size = 1000 # will flush if this many points get buffered - # batch-pending = 5 # number of batches that may be pending in memory - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. - -### -### [continuous_queries] -### -### Controls how continuous queries are run within InfluxDB. -### - -[continuous_queries] - log-enabled = true - enabled = true - recompute-previous-n = 2 - recompute-no-older-than = "10m" - compute-runs-per-interval = 10 - compute-no-more-than = "2m" diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/README.md deleted file mode 100644 index b8cd9ad8e..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/README.md +++ /dev/null @@ -1,193 +0,0 @@ -# Import/Export - -## Exporting from 0.8.9 - -Version `0.8.9` of InfluxDB adds support to export your data to a format that can be imported into `0.9.3` and later. - -Note that `0.8.9` can be found here: - -``` -http://get.influxdb.org.s3.amazonaws.com/influxdb_0.8.9_amd64.deb -http://get.influxdb.org.s3.amazonaws.com/influxdb-0.8.9-1.x86_64.rpm -``` - -### Design - -`0.8.9` exports raw data to a flat file that includes two sections, `DDL` and `DML`. You can choose to export them independently (see below). - -The `DDL` section contains the sql commands to create databases and retention policies. the `DML` section is [line protocol](https://github.com/influxdb/influxdb/blob/master/tsdb/README.md) and can be directly posted to the [http endpoint](https://influxdb.com/docs/v0.9/guides/writing_data.html) in `0.9`. Remember that batching is important and we don't recommend batch sizes over 5k. - -You need to specify a database and shard group when you export. - -To list out your shards, use the following http endpoint: - -`/cluster/shard_spaces` - -example: -```sh -http://username:password@localhost:8086/cluster/shard_spaces -``` - -Then, to export a database with then name "metrics" and a shard space with the name "default", issue the following curl command: - -```sh -curl -o export http://username:password@http://localhost:8086/export/metrics/default -``` - -Compression is supported, and will result in a significantly smaller file size. - -Use the following command for compression: -```sh -curl -o export.gz --compressed http://username:password@http://localhost:8086/export/metrics/default -``` - -You can also export just the `DDL` with this option: - -```sh -curl -o export.ddl http://username:password@http://localhost:8086/export/metrics/default?l=ddl -``` - -Or just the `DML` with this option: - -```sh -curl -o export.dml.gz --compressed http://username:password@http://localhost:8086/export/metrics/default?l=dml -``` - -### Assumptions - -- Series name mapping follows these [guidelines](https://influxdb.com/docs/v0.8/advanced_topics/schema_design.html) -- Database name will map directly from `0.8` to `0.9` -- Shard Spaces map to Retention Policies -- Shard Space Duration is ignored, as in `0.9` we determine shard size automatically -- Regex is used to match the correct series names and only exports that data for the database -- Duration becomes the new Retention Policy duration - -- Users are not migrated due to inability to get passwords. Anyone using users will need to manually set these back up in `0.9` - -### Upgrade Recommendations - -It's recommended that you upgrade to `0.9.3` first and have all your writes going there. Then, on the `0.8.X` instances, upgrade to `0.8.9`. - -It is important that when exporting you change your config to allow for the http endpoints not timing out. To do so, make this change in your config: - -```toml -# Configure the http api -[api] -read-timeout = "0s" -``` - -### Exceptions - -If a series can't be exported to tags based on the guidelines mentioned above, -we will insert the entire series name as the measurement name. You can either -allow that to import into the new InfluxDB instance, or you can do your own -data massage on it prior to importing it. - -For example, if you have the following series name: - -``` -metric.disk.c.host.server01.single -``` - -It will export as exactly thta as the measurement name and no tags: - -``` -metric.disk.c.host.server01.single -``` - -### Export Metrics - -When you export, you will now get comments inline in the `DML`: - -`# Found 999 Series for export` - -As well as count totals for each series exported: - -`# Series FOO - Points Exported: 999` - -With a total at the bottom: - -`# Points Exported: 999` - -You can grep the file that was exported at the end to get all the export metrics: - -`cat myexport | grep Exported` - -## Importing - -Version `0.9.3` of InfluxDB adds support to import your data from version `0.8.9`. - -## Caveats - -For the export/import to work, all requisites have to be met. For export, all series names in `0.8` should be in the following format: - -``` -.... -``` -for example: -``` -az.us-west-1.host.serverA.cpu -``` -or any number of tags -``` -building.2.temperature -``` - -Additionally, the fields need to have a consistent type (all float64, int64, etc) for every write in `0.8`. Otherwise they have the potential to fail writes in the import. -See below for more information. - -## Running the import command - - To import via the cli, you can specify the following command: - - ```sh - influx -import -path=metrics-default.gz -compressed - ``` - - If the file is not compressed you can issue it without the `-compressed` flag: - - ```sh - influx -import -path=metrics-default - ``` - - To redirect failed import lines to another file, run this command: - - ```sh - influx -import -path=metrics-default.gz -compressed > failures - ``` - - The import will use the line protocol in batches of 5,000 lines per batch when sending data to the server. - -### Throttiling the import - - If you need to throttle the import so the database has time to ingest, you can use the `-pps` flag. This will limit the points per second that will be sent to the server. - - ```sh - influx -import -path=metrics-default.gz -compressed -pps 50000 > failures - ``` - - Which is stating that you don't want MORE than 50,000 points per second to write to the database. Due to the processing that is taking place however, you will likely never get exactly 50,000 pps, more like 35,000 pps, etc. - -## Understanding the results of the import - -During the import, a status message will write out for every 100,000 points imported and report stats on the progress of the import: - -``` -2015/08/21 14:48:01 Processed 3100000 lines. Time elapsed: 56.740578415s. Points per second (PPS): 54634 -``` - - The batch will give some basic stats when finished: - - ```sh - 2015/07/29 23:15:20 Processed 2 commands - 2015/07/29 23:15:20 Processed 70207923 inserts - 2015/07/29 23:15:20 Failed 29785000 inserts - ``` - - Most inserts fail due to the following types of error: - - ```sh - 2015/07/29 22:18:28 error writing batch: write failed: field type conflict: input field "value" on measurement "metric" is type float64, already exists as type integer - ``` - - This is due to the fact that in `0.8` a field could get created and saved as int or float types for independent writes. In `0.9` the field has to have a consistent type. diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/v8/importer.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/v8/importer.go deleted file mode 100644 index 86e998fcd..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/v8/importer.go +++ /dev/null @@ -1,248 +0,0 @@ -package v8 - -import ( - "bufio" - "compress/gzip" - "fmt" - "io" - "log" - "net/url" - "os" - "strings" - "time" - - "github.com/influxdb/influxdb/client" -) - -const batchSize = 5000 - -// Config is the config used to initialize a Importer importer -type Config struct { - Username string - Password string - URL url.URL - Precision string - WriteConsistency string - Path string - Version string - Compressed bool - PPS int -} - -// NewConfig returns an initialized *Config -func NewConfig() *Config { - return &Config{} -} - -// Importer is the importer used for importing 0.8 data -type Importer struct { - client *client.Client - database string - retentionPolicy string - config *Config - batch []string - totalInserts int - failedInserts int - totalCommands int - throttlePointsWritten int - lastWrite time.Time - throttle *time.Ticker -} - -// NewImporter will return an intialized Importer struct -func NewImporter(config *Config) *Importer { - return &Importer{ - config: config, - batch: make([]string, 0, batchSize), - } -} - -// Import processes the specified file in the Config and writes the data to the databases in chunks specified by batchSize -func (i *Importer) Import() error { - // Create a client and try to connect - config := client.NewConfig() - config.URL = i.config.URL - config.Username = i.config.Username - config.Password = i.config.Password - config.UserAgent = fmt.Sprintf("influxDB importer/%s", i.config.Version) - cl, err := client.NewClient(config) - if err != nil { - return fmt.Errorf("could not create client %s", err) - } - i.client = cl - if _, _, e := i.client.Ping(); e != nil { - return fmt.Errorf("failed to connect to %s\n", i.client.Addr()) - } - - // Validate args - if i.config.Path == "" { - return fmt.Errorf("file argument required") - } - - defer func() { - if i.totalInserts > 0 { - log.Printf("Processed %d commands\n", i.totalCommands) - log.Printf("Processed %d inserts\n", i.totalInserts) - log.Printf("Failed %d inserts\n", i.failedInserts) - } - }() - - // Open the file - f, err := os.Open(i.config.Path) - if err != nil { - return err - } - defer f.Close() - - var r io.Reader - - // If gzipped, wrap in a gzip reader - if i.config.Compressed { - gr, err := gzip.NewReader(f) - if err != nil { - return err - } - defer gr.Close() - // Set the reader to the gzip reader - r = gr - } else { - // Standard text file so our reader can just be the file - r = f - } - - // Get our reader - scanner := bufio.NewScanner(r) - - // Process the DDL - i.processDDL(scanner) - - // Set up our throttle channel. Since there is effectively no other activity at this point - // the smaller resolution gets us much closer to the requested PPS - i.throttle = time.NewTicker(time.Microsecond) - defer i.throttle.Stop() - - // Prime the last write - i.lastWrite = time.Now() - - // Process the DML - i.processDML(scanner) - - // Check if we had any errors scanning the file - if err := scanner.Err(); err != nil { - return fmt.Errorf("reading standard input: %s", err) - } - - return nil -} - -func (i *Importer) processDDL(scanner *bufio.Scanner) { - for scanner.Scan() { - line := scanner.Text() - // If we find the DML token, we are done with DDL - if strings.HasPrefix(line, "# DML") { - return - } - if strings.HasPrefix(line, "#") { - continue - } - // Skip blank lines - if strings.TrimSpace(line) == "" { - continue - } - i.queryExecutor(line) - } -} - -func (i *Importer) processDML(scanner *bufio.Scanner) { - start := time.Now() - for scanner.Scan() { - line := scanner.Text() - if strings.HasPrefix(line, "# CONTEXT-DATABASE:") { - i.database = strings.TrimSpace(strings.Split(line, ":")[1]) - } - if strings.HasPrefix(line, "# CONTEXT-RETENTION-POLICY:") { - i.retentionPolicy = strings.TrimSpace(strings.Split(line, ":")[1]) - } - if strings.HasPrefix(line, "#") { - continue - } - // Skip blank lines - if strings.TrimSpace(line) == "" { - continue - } - i.batchAccumulator(line, start) - } - // Call batchWrite one last time to flush anything out in the batch - i.batchWrite() -} - -func (i *Importer) execute(command string) { - response, err := i.client.Query(client.Query{Command: command, Database: i.database}) - if err != nil { - log.Printf("error: %s\n", err) - return - } - if err := response.Error(); err != nil { - log.Printf("error: %s\n", response.Error()) - } -} - -func (i *Importer) queryExecutor(command string) { - i.totalCommands++ - i.execute(command) -} - -func (i *Importer) batchAccumulator(line string, start time.Time) { - i.batch = append(i.batch, line) - if len(i.batch) == batchSize { - i.batchWrite() - i.batch = i.batch[:0] - // Give some status feedback every 100000 lines processed - processed := i.totalInserts + i.failedInserts - if processed%100000 == 0 { - since := time.Since(start) - pps := float64(processed) / since.Seconds() - log.Printf("Processed %d lines. Time elapsed: %s. Points per second (PPS): %d", processed, since.String(), int64(pps)) - } - } -} - -func (i *Importer) batchWrite() { - // Accumulate the batch size to see how many points we have written this second - i.throttlePointsWritten += len(i.batch) - - // Find out when we last wrote data - since := time.Since(i.lastWrite) - - // Check to see if we've exceeded our points per second for the current timeframe - var currentPPS int - if since.Seconds() > 0 { - currentPPS = int(float64(i.throttlePointsWritten) / since.Seconds()) - } else { - currentPPS = i.throttlePointsWritten - } - - // If our currentPPS is greater than the PPS specified, then we wait and retry - if int(currentPPS) > i.config.PPS && i.config.PPS != 0 { - // Wait for the next tick - <-i.throttle.C - - // Decrement the batch size back out as it is going to get called again - i.throttlePointsWritten -= len(i.batch) - i.batchWrite() - return - } - - _, e := i.client.WriteLineProtocol(strings.Join(i.batch, "\n"), i.database, i.retentionPolicy, i.config.Precision, i.config.WriteConsistency) - if e != nil { - log.Println("error writing batch: ", e) - // Output failed lines to STDOUT so users can capture lines that failed to import - fmt.Println(strings.Join(i.batch, "\n")) - i.failedInserts += len(i.batch) - } else { - i.totalInserts += len(i.batch) - } - i.throttlePointsWritten = 0 - i.lastWrite = time.Now() - return -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/INFLUXQL.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/INFLUXQL.md deleted file mode 100644 index 106b80238..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/influxql/INFLUXQL.md +++ /dev/null @@ -1,707 +0,0 @@ -# The Influx Query Language Specification - -## Introduction - -This is a reference for the Influx Query Language ("InfluxQL"). - -InfluxQL is a SQL-like query language for interacting with InfluxDB. It has been lovingly crafted to feel familiar to those coming from other SQL or SQL-like environments while providing features specific to storing and analyzing time series data. - -## Notation - -The syntax is specified using Extended Backus-Naur Form ("EBNF"). EBNF is the same notation used in the [Go](http://golang.org) programming language specification, which can be found [here](https://golang.org/ref/spec). Not so coincidentally, InfluxDB is written in Go. - -``` -Production = production_name "=" [ Expression ] "." . -Expression = Alternative { "|" Alternative } . -Alternative = Term { Term } . -Term = production_name | token [ "…" token ] | Group | Option | Repetition . -Group = "(" Expression ")" . -Option = "[" Expression "]" . -Repetition = "{" Expression "}" . -``` - -Notation operators in order of increasing precedence: - -``` -| alternation -() grouping -[] option (0 or 1 times) -{} repetition (0 to n times) -``` - -## Query representation - -### Characters - -InfluxQL is Unicode text encoded in [UTF-8](http://en.wikipedia.org/wiki/UTF-8). - -``` -newline = /* the Unicode code point U+000A */ . -unicode_char = /* an arbitrary Unicode code point except newline */ . -``` - -## Letters and digits - -Letters are the set of ASCII characters plus the underscore character _ (U+005F) is considered a letter. - -Only decimal digits are supported. - -``` -letter = ascii_letter | "_" . -ascii_letter = "A" … "Z" | "a" … "z" . -digit = "0" … "9" . -``` - -## Identifiers - -Identifiers are tokens which refer to database names, retention policy names, user names, measurement names, tag keys, and field keys. - -The rules: - -- double quoted identifiers can contain any unicode character other than a new line -- double quoted identifiers can contain escaped `"` characters (i.e., `\"`) -- unquoted identifiers must start with an upper or lowercase ASCII character or "_" -- unquoted identifiers may contain only ASCII letters, decimal digits, and "_" - -``` -identifier = unquoted_identifier | quoted_identifier . -unquoted_identifier = ( letter ) { letter | digit } . -quoted_identifier = `"` unicode_char { unicode_char } `"` . -``` - -#### Examples: - -``` -cpu -_cpu_stats -"1h" -"anything really" -"1_Crazy-1337.identifier>NAME👍" -``` - -## Keywords - -``` -ALL ALTER ANY AS ASC BEGIN -BY CREATE CONTINUOUS DATABASE DATABASES DEFAULT -DELETE DESC DESTINATIONS DIAGNOSTICS DISTINCT DROP -DURATION END EXISTS EXPLAIN FIELD FOR -FORCE FROM GRANT GRANTS GROUP IF -IN INF INNER INSERT INTO KEY -KEYS LIMIT SHOW MEASUREMENT MEASUREMENTS NOT -OFFSET ON ORDER PASSWORD POLICY POLICIES -PRIVILEGES QUERIES QUERY READ REPLICATION RETENTION -REVOKE SELECT SERIES SERVER SERVERS SET -SHARDS SLIMIT SOFFSET STATS SUBSCRIPTION SUBSCRIPTIONS -TAG TO USER USERS VALUES WHERE -WITH WRITE -``` - -## Literals - -### Integers - -InfluxQL supports decimal integer literals. Hexadecimal and octal literals are not currently supported. - -``` -int_lit = ( "1" … "9" ) { digit } . -``` - -### Floats - -InfluxQL supports floating-point literals. Exponents are not currently supported. - -``` -float_lit = int_lit "." int_lit . -``` - -### Strings - -String literals must be surrounded by single quotes. Strings may contain `'` characters as long as they are escaped (i.e., `\'`). - -``` -string_lit = `'` { unicode_char } `'`' . -``` - -### Durations - -Duration literals specify a length of time. An integer literal followed immediately (with no spaces) by a duration unit listed below is interpreted as a duration literal. - -### Duration units -| Units | Meaning | -|--------|-----------------------------------------| -| u or µ | microseconds (1 millionth of a second) | -| ms | milliseconds (1 thousandth of a second) | -| s | second | -| m | minute | -| h | hour | -| d | day | -| w | week | - -``` -duration_lit = int_lit duration_unit . -duration_unit = "u" | "µ" | "s" | "h" | "d" | "w" | "ms" . -``` - -### Dates & Times - -The date and time literal format is not specified in EBNF like the rest of this document. It is specified using Go's date / time parsing format, which is a reference date written in the format required by InfluxQL. The reference date time is: - -InfluxQL reference date time: January 2nd, 2006 at 3:04:05 PM - -``` -time_lit = "2006-01-02 15:04:05.999999" | "2006-01-02" -``` - -### Booleans - -``` -bool_lit = TRUE | FALSE . -``` - -### Regular Expressions - -``` -regex_lit = "/" { unicode_char } "/" . -``` - -## Queries - -A query is composed of one or more statements separated by a semicolon. - -``` -query = statement { ; statement } . - -statement = alter_retention_policy_stmt | - create_continuous_query_stmt | - create_database_stmt | - create_retention_policy_stmt | - create_user_stmt | - create_subscription_stmt | - delete_stmt | - drop_continuous_query_stmt | - drop_database_stmt | - drop_measurement_stmt | - drop_retention_policy_stmt | - drop_series_stmt | - drop_subscription_stmt | - drop_user_stmt | - grant_stmt | - show_continuous_queries_stmt | - show_databases_stmt | - show_field_keys_stmt | - show_measurements_stmt | - show_retention_policies | - show_series_stmt | - show_shards_stmt | - show_subscriptions_stmt| - show_tag_keys_stmt | - show_tag_values_stmt | - show_users_stmt | - revoke_stmt | - select_stmt . -``` - -## Statements - -### ALTER RETENTION POLICY - -``` -alter_retention_policy_stmt = "ALTER RETENTION POLICY" policy_name "ON" - db_name retention_policy_option - [ retention_policy_option ] - [ retention_policy_option ] . - -db_name = identifier . - -policy_name = identifier . - -retention_policy_option = retention_policy_duration | - retention_policy_replication | - "DEFAULT" . - -retention_policy_duration = "DURATION" duration_lit . -retention_policy_replication = "REPLICATION" int_lit -``` - -#### Examples: - -```sql --- Set default retention policy for mydb to 1h.cpu. -ALTER RETENTION POLICY "1h.cpu" ON mydb DEFAULT; - --- Change duration and replication factor. -ALTER RETENTION POLICY policy1 ON somedb DURATION 1h REPLICATION 4 -``` - -### CREATE CONTINUOUS QUERY - -``` -create_continuous_query_stmt = "CREATE CONTINUOUS QUERY" query_name "ON" db_name - "BEGIN" select_stmt "END" . - -query_name = identifier . -``` - -#### Examples: - -```sql --- selects from default retention policy and writes into 6_months retention policy -CREATE CONTINUOUS QUERY "10m_event_count" -ON db_name -BEGIN - SELECT count(value) - INTO "6_months".events - FROM events - GROUP BY time(10m) -END; - --- this selects from the output of one continuous query in one retention policy and outputs to another series in another retention policy -CREATE CONTINUOUS QUERY "1h_event_count" -ON db_name -BEGIN - SELECT sum(count) as count - INTO "2_years".events - FROM "6_months".events - GROUP BY time(1h) -END; -``` - -### CREATE DATABASE - -``` -create_database_stmt = "CREATE DATABASE" db_name -``` - -#### Example: - -```sql -CREATE DATABASE foo -``` - -### CREATE RETENTION POLICY - -``` -create_retention_policy_stmt = "CREATE RETENTION POLICY" policy_name "ON" - db_name retention_policy_duration - retention_policy_replication - [ "DEFAULT" ] . -``` - -#### Examples - -```sql --- Create a retention policy. -CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2; - --- Create a retention policy and set it as the default. -CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2 DEFAULT; -``` - -### CREATE SUBSCRIPTION - -``` -create_subscription_stmt = "CREATE SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy "DESTINATIONS" ("ANY"|"ALL") host { "," host} . -``` - -#### Examples: - -```sql --- Create a SUBSCRIPTION on database 'mydb' and retention policy 'default' that send data to 'example.com:9090' via UDP. -CREATE SUBSCRIPTION sub0 ON "mydb"."default" DESTINATIONS ALL 'udp://example.com:9090' ; - --- Create a SUBSCRIPTION on database 'mydb' and retention policy 'default' that round robins the data to 'h1.example.com:9090' and 'h2.example.com:9090'. -CREATE SUBSCRIPTION sub0 ON "mydb"."default" DESTINATIONS ANY 'udp://h1.example.com:9090', 'udp://h2.example.com:9090'; -``` - -### CREATE USER - -``` -create_user_stmt = "CREATE USER" user_name "WITH PASSWORD" password - [ "WITH ALL PRIVILEGES" ] . -``` - -#### Examples: - -```sql --- Create a normal database user. -CREATE USER jdoe WITH PASSWORD '1337password'; - --- Create a cluster admin. --- Note: Unlike the GRANT statement, the "PRIVILEGES" keyword is required here. -CREATE USER jdoe WITH PASSWORD '1337password' WITH ALL PRIVILEGES; -``` - -### DELETE - -``` -delete_stmt = "DELETE" from_clause where_clause . -``` - -#### Example: - -```sql --- delete data points from the cpu measurement where the region tag --- equals 'uswest' -DELETE FROM cpu WHERE region = 'uswest'; -``` - -### DROP CONTINUOUS QUERY - -drop_continuous_query_stmt = "DROP CONTINUOUS QUERY" query_name . - -#### Example: - -```sql -DROP CONTINUOUS QUERY myquery; -``` - -### DROP DATABASE - -drop_database_stmt = "DROP DATABASE" db_name . - -#### Example: - -```sql -DROP DATABASE mydb; -``` - -### DROP MEASUREMENT - -``` -drop_measurement_stmt = "DROP MEASUREMENT" measurement . -``` - -#### Examples: - -```sql --- drop the cpu measurement -DROP MEASUREMENT cpu; -``` - -### DROP RETENTION POLICY - -``` -drop_retention_policy_stmt = "DROP RETENTION POLICY" policy_name "ON" db_name . -``` - -#### Example: - -```sql --- drop the retention policy named 1h.cpu from mydb -DROP RETENTION POLICY "1h.cpu" ON mydb; -``` - -### DROP SERIES - -``` -drop_series_stmt = "DROP SERIES" [ from_clause ] [ where_clause ] -``` - -#### Example: - -```sql - -``` - -### DROP SUBSCRIPTION - -``` -drop_subscription_stmt = "DROP SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy . -``` - -#### Example: - -```sql -DROP SUBSCRIPTION sub0 ON "mydb"."default"; - -``` - -### DROP USER - -``` -drop_user_stmt = "DROP USER" user_name . -``` - -#### Example: - -```sql -DROP USER jdoe; - -``` - -### GRANT - -NOTE: Users can be granted privileges on databases that do not exist. - -``` -grant_stmt = "GRANT" privilege [ on_clause ] to_clause -``` - -#### Examples: - -```sql --- grant cluster admin privileges -GRANT ALL TO jdoe; - --- grant read access to a database -GRANT READ ON mydb TO jdoe; -``` - -### SHOW CONTINUOUS QUERIES - -show_continuous_queries_stmt = "SHOW CONTINUOUS QUERIES" - -#### Example: - -```sql --- show all continuous queries -SHOW CONTINUOUS QUERIES; -``` - -### SHOW DATABASES - -``` -show_databases_stmt = "SHOW DATABASES" . -``` - -#### Example: - -```sql --- show all databases -SHOW DATABASES; -``` - -### SHOW FIELD - -show_field_keys_stmt = "SHOW FIELD KEYS" [ from_clause ] . - -#### Examples: - -```sql --- show field keys from all measurements -SHOW FIELD KEYS; - --- show field keys from specified measurement -SHOW FIELD KEYS FROM cpu; -``` - -### SHOW MEASUREMENTS - -show_measurements_stmt = "SHOW MEASUREMENTS" [ where_clause ] [ group_by_clause ] [ limit_clause ] - [ offset_clause ] . - -```sql --- show all measurements -SHOW MEASUREMENTS; - --- show measurements where region tag = 'uswest' AND host tag = 'serverA' -SHOW MEASUREMENTS WHERE region = 'uswest' AND host = 'serverA'; -``` - -### SHOW RETENTION POLICIES - -``` -show_retention_policies = "SHOW RETENTION POLICIES ON" db_name . -``` - -#### Example: - -```sql --- show all retention policies on a database -SHOW RETENTION POLICIES ON mydb; -``` - -### SHOW SERIES - -``` -show_series_stmt = "SHOW SERIES" [ from_clause ] [ where_clause ] [ group_by_clause ] - [ limit_clause ] [ offset_clause ] . -``` - -#### Example: - -```sql - -``` - -### SHOW SHARDS - -``` -show_shards_stmt = "SHOW SHARDS" . -``` - -#### Example: - -```sql -SHOW SHARDS; -``` - -### SHOW SUBSCRIPTIONS - -``` -show_subscriptions_stmt = "SHOW SUBSCRIPTIONS" . -``` - -#### Example: - -```sql -SHOW SUBSCRIPTIONS; -``` - -### SHOW TAG KEYS - -``` -show_tag_keys_stmt = "SHOW TAG KEYS" [ from_clause ] [ where_clause ] [ group_by_clause ] - [ limit_clause ] [ offset_clause ] . -``` - -#### Examples: - -```sql --- show all tag keys -SHOW TAG KEYS; - --- show all tag keys from the cpu measurement -SHOW TAG KEYS FROM cpu; - --- show all tag keys from the cpu measurement where the region key = 'uswest' -SHOW TAG KEYS FROM cpu WHERE region = 'uswest'; - --- show all tag keys where the host key = 'serverA' -SHOW TAG KEYS WHERE host = 'serverA'; -``` - -### SHOW TAG VALUES - -``` -show_tag_values_stmt = "SHOW TAG VALUES" [ from_clause ] with_tag_clause [ where_clause ] - [ group_by_clause ] [ limit_clause ] [ offset_clause ] . -``` - -#### Examples: - -```sql --- show all tag values across all measurements for the region tag -SHOW TAG VALUES WITH TAG = 'region'; - --- show tag values from the cpu measurement for the region tag -SHOW TAG VALUES FROM cpu WITH TAG = 'region'; - --- show tag values from the cpu measurement for region & host tag keys where service = 'redis' -SHOW TAG VALUES FROM cpu WITH TAG IN (region, host) WHERE service = 'redis'; -``` - -### SHOW USERS - -``` -show_users_stmt = "SHOW USERS" . -``` - -#### Example: - -```sql --- show all users -SHOW USERS; -``` - -### REVOKE - -``` -revoke_stmt = "REVOKE" privilege [ "ON" db_name ] "FROM" user_name -``` - -#### Examples: - -```sql --- revoke cluster admin from jdoe -REVOKE ALL PRIVILEGES FROM jdoe; - --- revoke read privileges from jdoe on mydb -REVOKE READ ON mydb FROM jdoe; -``` - -### SELECT - -``` -select_stmt = "SELECT" fields from_clause [ into_clause ] [ where_clause ] - [ group_by_clause ] [ order_by_clause ] [ limit_clause ] - [ offset_clause ] [ slimit_clause ] [ soffset_clause ]. -``` - -#### Examples: - -```sql --- select mean value from the cpu measurement where region = 'uswest' grouped by 10 minute intervals -SELECT mean(value) FROM cpu WHERE region = 'uswest' GROUP BY time(10m) fill(0); -``` - -## Clauses - -``` -from_clause = "FROM" measurements . - -group_by_clause = "GROUP BY" dimensions fill(