Godep update and dependency resolution
This commit is contained in:
parent
e10394ba3b
commit
10a20e208a
|
@ -108,8 +108,8 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/influxdb/influxdb",
|
"ImportPath": "github.com/influxdb/influxdb",
|
||||||
"Comment": "v0.9.4-rc1-884-g9625953",
|
"Comment": "v0.9.4-rc1-922-gb0e9f7e",
|
||||||
"Rev": "9625953d3e06bd41b18c9d05aa1feccf353e20c8"
|
"Rev": "b0e9f7e844225b05abf9f4455229490f99348ac4"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/lib/pq",
|
"ImportPath": "github.com/lib/pq",
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
### Release Notes
|
### Release Notes
|
||||||
- Field names for the internal stats have been changed to be more inline with Go style.
|
- Field names for the internal stats have been changed to be more inline with Go style.
|
||||||
|
- 0.9.5 is reverting to Go 1.4.2 due to unresolved issues with Go 1.5.1.
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- [#4098](https://github.com/influxdb/influxdb/pull/4702): Support 'history' command at CLI
|
- [#4098](https://github.com/influxdb/influxdb/pull/4702): Support 'history' command at CLI
|
||||||
|
@ -35,6 +36,7 @@
|
||||||
- [#4721](https://github.com/influxdb/influxdb/pull/4721): Export tsdb.InterfaceValues
|
- [#4721](https://github.com/influxdb/influxdb/pull/4721): Export tsdb.InterfaceValues
|
||||||
- [#4681](https://github.com/influxdb/influxdb/pull/4681): Increase default buffer size for collectd and graphite listeners
|
- [#4681](https://github.com/influxdb/influxdb/pull/4681): Increase default buffer size for collectd and graphite listeners
|
||||||
- [#4659](https://github.com/influxdb/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE
|
- [#4659](https://github.com/influxdb/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE
|
||||||
|
- [#4685](https://github.com/influxdb/influxdb/pull/4685): Automatically promote node to raft peer if drop server results in removing a raft peer.
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- [#4715](https://github.com/influxdb/influxdb/pull/4715): Fix panic during Raft-close. Fix [issue #4707](https://github.com/influxdb/influxdb/issues/4707). Thanks @oiooj
|
- [#4715](https://github.com/influxdb/influxdb/pull/4715): Fix panic during Raft-close. Fix [issue #4707](https://github.com/influxdb/influxdb/issues/4707). Thanks @oiooj
|
||||||
|
@ -110,7 +112,8 @@
|
||||||
- [#4651](https://github.com/influxdb/influxdb/issues/4651): Importer doesn't flush out last batch
|
- [#4651](https://github.com/influxdb/influxdb/issues/4651): Importer doesn't flush out last batch
|
||||||
- [#4602](https://github.com/influxdb/influxdb/issues/4602): Fixes data race between PointsWriter and Subscriber services.
|
- [#4602](https://github.com/influxdb/influxdb/issues/4602): Fixes data race between PointsWriter and Subscriber services.
|
||||||
- [#4691](https://github.com/influxdb/influxdb/issues/4691): Enable toml test `TestConfig_Encode`.
|
- [#4691](https://github.com/influxdb/influxdb/issues/4691): Enable toml test `TestConfig_Encode`.
|
||||||
- [#4684](https://github.com/influxdb/influxdb/pull/4684): Add Graphite and UDP section to default config. Thanks @nkatsaros
|
- [#4283](https://github.com/influxdb/influxdb/pull/4283): Disable HintedHandoff if configuration is not set.
|
||||||
|
- [#4703](https://github.com/influxdb/influxdb/pull/4703): Complete lint for cmd/influx. Thanks @pablolmiranda
|
||||||
|
|
||||||
## v0.9.4 [2015-09-14]
|
## v0.9.4 [2015-09-14]
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ Remember the golden rule of bug reports: **The easier you make it for us to repr
|
||||||
If you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html)
|
If you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html)
|
||||||
|
|
||||||
Test cases should be in the form of `curl` commands. For example:
|
Test cases should be in the form of `curl` commands. For example:
|
||||||
```
|
```bash
|
||||||
# create database
|
# create database
|
||||||
curl -G http://localhost:8086/query --data-urlencode "q=CREATE DATABASE mydb"
|
curl -G http://localhost:8086/query --data-urlencode "q=CREATE DATABASE mydb"
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ second to sign our CLA, which can be found
|
||||||
|
|
||||||
Installing Go
|
Installing Go
|
||||||
-------------
|
-------------
|
||||||
InfluxDB requires Go 1.5 or greater.
|
InfluxDB requires Go 1.4 or greater.
|
||||||
|
|
||||||
At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions
|
At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions
|
||||||
on how to install it see [the gvm page on github](https://github.com/moovweb/gvm).
|
on how to install it see [the gvm page on github](https://github.com/moovweb/gvm).
|
||||||
|
@ -81,8 +81,8 @@ on how to install it see [the gvm page on github](https://github.com/moovweb/gvm
|
||||||
After installing gvm you can install and set the default go version by
|
After installing gvm you can install and set the default go version by
|
||||||
running the following:
|
running the following:
|
||||||
|
|
||||||
gvm install go1.5.1
|
gvm install go1.4.2
|
||||||
gvm use go1.5.1 --default
|
gvm use go1.4.2 --default
|
||||||
|
|
||||||
Revision Control Systems
|
Revision Control Systems
|
||||||
-------------
|
-------------
|
||||||
|
@ -96,9 +96,11 @@ Getting the source
|
||||||
------
|
------
|
||||||
Setup the project structure and fetch the repo like so:
|
Setup the project structure and fetch the repo like so:
|
||||||
|
|
||||||
|
```bash
|
||||||
mkdir $HOME/gocodez
|
mkdir $HOME/gocodez
|
||||||
export GOPATH=$HOME/gocodez
|
export GOPATH=$HOME/gocodez
|
||||||
go get github.com/influxdb/influxdb
|
go get github.com/influxdb/influxdb
|
||||||
|
```
|
||||||
|
|
||||||
You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh file to be set for every shell instead of having to manually run it everytime.
|
You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh file to be set for every shell instead of having to manually run it everytime.
|
||||||
|
|
||||||
|
@ -106,10 +108,12 @@ Cloning a fork
|
||||||
-------------
|
-------------
|
||||||
If you wish to work with fork of InfluxDB, your own fork for example, you must still follow the directory structure above. But instead of cloning the main repo, instead clone your fork. Follow the steps below to work with a fork:
|
If you wish to work with fork of InfluxDB, your own fork for example, you must still follow the directory structure above. But instead of cloning the main repo, instead clone your fork. Follow the steps below to work with a fork:
|
||||||
|
|
||||||
|
```bash
|
||||||
export GOPATH=$HOME/gocodez
|
export GOPATH=$HOME/gocodez
|
||||||
mkdir -p $GOPATH/src/github.com/influxdb
|
mkdir -p $GOPATH/src/github.com/influxdb
|
||||||
cd $GOPATH/src/github.com/influxdb
|
cd $GOPATH/src/github.com/influxdb
|
||||||
git clone git@github.com:<username>/influxdb
|
git clone git@github.com:<username>/influxdb
|
||||||
|
```
|
||||||
|
|
||||||
Retaining the directory structure `$GOPATH/src/github.com/influxdb` is necessary so that Go imports work correctly.
|
Retaining the directory structure `$GOPATH/src/github.com/influxdb` is necessary so that Go imports work correctly.
|
||||||
|
|
||||||
|
@ -117,10 +121,10 @@ Pre-commit checks
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
We have a pre-commit hook to make sure code is formatted properly and vetted before you commit any changes. We strongly recommend using the pre-commit hook to guard against accidentally committing unformatted code. To use the pre-commit hook, run the following:
|
We have a pre-commit hook to make sure code is formatted properly and vetted before you commit any changes. We strongly recommend using the pre-commit hook to guard against accidentally committing unformatted code. To use the pre-commit hook, run the following:
|
||||||
|
```bash
|
||||||
cd $GOPATH/src/github.com/influxdb/influxdb
|
cd $GOPATH/src/github.com/influxdb/influxdb
|
||||||
cp .hooks/pre-commit .git/hooks/
|
cp .hooks/pre-commit .git/hooks/
|
||||||
|
```
|
||||||
In case the commit is rejected because it's not formatted you can run
|
In case the commit is rejected because it's not formatted you can run
|
||||||
the following to format the code:
|
the following to format the code:
|
||||||
|
|
||||||
|
@ -158,7 +162,7 @@ go install ./...
|
||||||
To set the version and commit flags during the build pass the following to the build command:
|
To set the version and commit flags during the build pass the following to the build command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
-ldflags="-X main.version=$VERSION -X main.branch=$BRANCH -X main.commit=$COMMIT -X main.buildTime=$TIME"
|
-ldflags="-X main.version $VERSION -X main.branch $BRANCH -X main.commit $COMMIT -X main.buildTime $TIME"
|
||||||
```
|
```
|
||||||
|
|
||||||
where `$VERSION` is the version, `$BRANCH` is the branch, `$COMMIT` is the git commit hash, and `$TIME` is the build timestamp.
|
where `$VERSION` is the version, `$BRANCH` is the branch, `$COMMIT` is the git commit hash, and `$TIME` is the build timestamp.
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
FROM 32bit/ubuntu:14.04
|
FROM 32bit/ubuntu:14.04
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y python-software-properties software-properties-common git
|
RUN apt-get update && apt-get install -y python-software-properties software-properties-common git
|
||||||
RUN add-apt-repository ppa:evarlast/golang1.5
|
RUN add-apt-repository ppa:evarlast/golang1.4
|
||||||
RUN apt-get update && apt-get install -y -o Dpkg::Options::="--force-overwrite" golang-go
|
RUN apt-get update && apt-get install -y -o Dpkg::Options::="--force-overwrite" golang-go
|
||||||
|
|
||||||
ENV GOPATH=/root/go
|
ENV GOPATH=/root/go
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
# build process for InfluxDB.
|
# build process for InfluxDB.
|
||||||
|
|
||||||
BUILD_DIR=$HOME/influxdb-build
|
BUILD_DIR=$HOME/influxdb-build
|
||||||
GO_VERSION=go1.5.1
|
GO_VERSION=go1.4.2
|
||||||
PARALLELISM="-parallel 256"
|
PARALLELISM="-parallel 256"
|
||||||
TIMEOUT="-timeout 480s"
|
TIMEOUT="-timeout 480s"
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ machine:
|
||||||
- docker
|
- docker
|
||||||
pre:
|
pre:
|
||||||
- bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer)
|
- bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer)
|
||||||
- source $HOME/.gvm/scripts/gvm; gvm install go1.5.1 --binary
|
- source $HOME/.gvm/scripts/gvm; gvm install go1.4.2 --binary
|
||||||
|
|
||||||
dependencies:
|
dependencies:
|
||||||
override:
|
override:
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
|
@ -20,9 +21,10 @@ const (
|
||||||
UDPPayloadSize = 512
|
UDPPayloadSize = 512
|
||||||
)
|
)
|
||||||
|
|
||||||
type Config struct {
|
type HTTPConfig struct {
|
||||||
// URL of the InfluxDB database
|
// Addr should be of the form "http://host:port"
|
||||||
URL *url.URL
|
// or "http://[ipv6-host%zone]:port".
|
||||||
|
Addr string
|
||||||
|
|
||||||
// Username is the influxdb username, optional
|
// Username is the influxdb username, optional
|
||||||
Username string
|
Username string
|
||||||
|
@ -42,7 +44,8 @@ type Config struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type UDPConfig struct {
|
type UDPConfig struct {
|
||||||
// Addr should be of the form "host:port" or "[ipv6-host%zone]:port".
|
// Addr should be of the form "udp://host:port"
|
||||||
|
// or "udp://[ipv6-host%zone]:port".
|
||||||
Addr string
|
Addr string
|
||||||
|
|
||||||
// PayloadSize is the maximum size of a UDP client message, optional
|
// PayloadSize is the maximum size of a UDP client message, optional
|
||||||
|
@ -78,17 +81,27 @@ type Client interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a client interface from the given config.
|
// NewClient creates a client interface from the given config.
|
||||||
func NewClient(conf Config) Client {
|
func NewHTTPClient(conf HTTPConfig) (Client, error) {
|
||||||
if conf.UserAgent == "" {
|
if conf.UserAgent == "" {
|
||||||
conf.UserAgent = "InfluxDBClient"
|
conf.UserAgent = "InfluxDBClient"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u, err := url.Parse(conf.Addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if u.Scheme != "http" && u.Scheme != "https" {
|
||||||
|
m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+
|
||||||
|
" must start with http:// or https://", u.Scheme)
|
||||||
|
return nil, errors.New(m)
|
||||||
|
}
|
||||||
|
|
||||||
tr := &http.Transport{
|
tr := &http.Transport{
|
||||||
TLSClientConfig: &tls.Config{
|
TLSClientConfig: &tls.Config{
|
||||||
InsecureSkipVerify: conf.InsecureSkipVerify,
|
InsecureSkipVerify: conf.InsecureSkipVerify,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return &client{
|
return &client{
|
||||||
url: conf.URL,
|
url: u,
|
||||||
username: conf.Username,
|
username: conf.Username,
|
||||||
password: conf.Password,
|
password: conf.Password,
|
||||||
useragent: conf.UserAgent,
|
useragent: conf.UserAgent,
|
||||||
|
@ -96,7 +109,7 @@ func NewClient(conf Config) Client {
|
||||||
Timeout: conf.Timeout,
|
Timeout: conf.Timeout,
|
||||||
Transport: tr,
|
Transport: tr,
|
||||||
},
|
},
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close releases the client's resources.
|
// Close releases the client's resources.
|
||||||
|
@ -178,18 +191,18 @@ type BatchPoints interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBatchPoints returns a BatchPoints interface based on the given config.
|
// NewBatchPoints returns a BatchPoints interface based on the given config.
|
||||||
func NewBatchPoints(c BatchPointsConfig) (BatchPoints, error) {
|
func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) {
|
||||||
if c.Precision == "" {
|
if conf.Precision == "" {
|
||||||
c.Precision = "ns"
|
conf.Precision = "ns"
|
||||||
}
|
}
|
||||||
if _, err := time.ParseDuration("1" + c.Precision); err != nil {
|
if _, err := time.ParseDuration("1" + conf.Precision); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
bp := &batchpoints{
|
bp := &batchpoints{
|
||||||
database: c.Database,
|
database: conf.Database,
|
||||||
precision: c.Precision,
|
precision: conf.Precision,
|
||||||
retentionPolicy: c.RetentionPolicy,
|
retentionPolicy: conf.RetentionPolicy,
|
||||||
writeConsistency: c.WriteConsistency,
|
writeConsistency: conf.WriteConsistency,
|
||||||
}
|
}
|
||||||
return bp, nil
|
return bp, nil
|
||||||
}
|
}
|
||||||
|
@ -392,6 +405,17 @@ type Query struct {
|
||||||
Precision string
|
Precision string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewQuery returns a query object
|
||||||
|
// database and precision strings can be empty strings if they are not needed
|
||||||
|
// for the query.
|
||||||
|
func NewQuery(command, database, precision string) Query {
|
||||||
|
return Query{
|
||||||
|
Command: command,
|
||||||
|
Database: database,
|
||||||
|
Precision: precision,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Response represents a list of statement results.
|
// Response represents a list of statement results.
|
||||||
type Response struct {
|
type Response struct {
|
||||||
Results []Result
|
Results []Result
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"net/url"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -66,9 +65,8 @@ func TestClient_Query(t *testing.T) {
|
||||||
}))
|
}))
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
u, _ := url.Parse(ts.URL)
|
config := HTTPConfig{Addr: ts.URL}
|
||||||
config := Config{URL: u}
|
c, _ := NewHTTPClient(config)
|
||||||
c := NewClient(config)
|
|
||||||
defer c.Close()
|
defer c.Close()
|
||||||
|
|
||||||
query := Query{}
|
query := Query{}
|
||||||
|
@ -97,10 +95,8 @@ func TestClient_BasicAuth(t *testing.T) {
|
||||||
}))
|
}))
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
u, _ := url.Parse(ts.URL)
|
config := HTTPConfig{Addr: ts.URL, Username: "username", Password: "password"}
|
||||||
u.User = url.UserPassword("username", "password")
|
c, _ := NewHTTPClient(config)
|
||||||
config := Config{URL: u, Username: "username", Password: "password"}
|
|
||||||
c := NewClient(config)
|
|
||||||
defer c.Close()
|
defer c.Close()
|
||||||
|
|
||||||
query := Query{}
|
query := Query{}
|
||||||
|
@ -118,9 +114,8 @@ func TestClient_Write(t *testing.T) {
|
||||||
}))
|
}))
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
u, _ := url.Parse(ts.URL)
|
config := HTTPConfig{Addr: ts.URL}
|
||||||
config := Config{URL: u}
|
c, _ := NewHTTPClient(config)
|
||||||
c := NewClient(config)
|
|
||||||
defer c.Close()
|
defer c.Close()
|
||||||
|
|
||||||
bp, err := NewBatchPoints(BatchPointsConfig{})
|
bp, err := NewBatchPoints(BatchPointsConfig{})
|
||||||
|
@ -167,9 +162,9 @@ func TestClient_UserAgent(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
u, _ := url.Parse(ts.URL)
|
|
||||||
config := Config{URL: u, UserAgent: test.userAgent}
|
config := HTTPConfig{Addr: ts.URL, UserAgent: test.userAgent}
|
||||||
c := NewClient(config)
|
c, _ := NewHTTPClient(config)
|
||||||
defer c.Close()
|
defer c.Close()
|
||||||
|
|
||||||
receivedUserAgent = ""
|
receivedUserAgent = ""
|
||||||
|
|
|
@ -2,9 +2,7 @@ package client_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net/url"
|
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -12,17 +10,17 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Create a new client
|
// Create a new client
|
||||||
func ExampleClient() client.Client {
|
func ExampleClient() {
|
||||||
u, _ := url.Parse("http://localhost:8086")
|
|
||||||
|
|
||||||
// NOTE: this assumes you've setup a user and have setup shell env variables,
|
// NOTE: this assumes you've setup a user and have setup shell env variables,
|
||||||
// namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below.
|
// namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below.
|
||||||
client := client.NewClient(client.Config{
|
_, err := client.NewHTTPClient(client.HTTPConfig{
|
||||||
URL: u,
|
Addr: "http://localhost:8086",
|
||||||
Username: os.Getenv("INFLUX_USER"),
|
Username: os.Getenv("INFLUX_USER"),
|
||||||
Password: os.Getenv("INFLUX_PWD"),
|
Password: os.Getenv("INFLUX_PWD"),
|
||||||
})
|
})
|
||||||
return client
|
if err != nil {
|
||||||
|
fmt.Println("Error creating InfluxDB Client: ", err.Error())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write a point using the UDP client
|
// Write a point using the UDP client
|
||||||
|
@ -31,7 +29,7 @@ func ExampleClient_uDP() {
|
||||||
config := client.UDPConfig{Addr: "localhost:8089"}
|
config := client.UDPConfig{Addr: "localhost:8089"}
|
||||||
c, err := client.NewUDPClient(config)
|
c, err := client.NewUDPClient(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err.Error())
|
fmt.Println("Error: ", err.Error())
|
||||||
}
|
}
|
||||||
defer c.Close()
|
defer c.Close()
|
||||||
|
|
||||||
|
@ -49,7 +47,7 @@ func ExampleClient_uDP() {
|
||||||
}
|
}
|
||||||
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
|
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err.Error())
|
fmt.Println("Error: ", err.Error())
|
||||||
}
|
}
|
||||||
bp.AddPoint(pt)
|
bp.AddPoint(pt)
|
||||||
|
|
||||||
|
@ -60,10 +58,12 @@ func ExampleClient_uDP() {
|
||||||
// Write a point using the HTTP client
|
// Write a point using the HTTP client
|
||||||
func ExampleClient_write() {
|
func ExampleClient_write() {
|
||||||
// Make client
|
// Make client
|
||||||
u, _ := url.Parse("http://localhost:8086")
|
c, err := client.NewHTTPClient(client.HTTPConfig{
|
||||||
c := client.NewClient(client.Config{
|
Addr: "http://localhost:8086",
|
||||||
URL: u,
|
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error creating InfluxDB Client: ", err.Error())
|
||||||
|
}
|
||||||
defer c.Close()
|
defer c.Close()
|
||||||
|
|
||||||
// Create a new point batch
|
// Create a new point batch
|
||||||
|
@ -81,7 +81,7 @@ func ExampleClient_write() {
|
||||||
}
|
}
|
||||||
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
|
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err.Error())
|
fmt.Println("Error: ", err.Error())
|
||||||
}
|
}
|
||||||
bp.AddPoint(pt)
|
bp.AddPoint(pt)
|
||||||
|
|
||||||
|
@ -106,7 +106,7 @@ func ExampleBatchPoints() {
|
||||||
}
|
}
|
||||||
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
|
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err.Error())
|
fmt.Println("Error: ", err.Error())
|
||||||
}
|
}
|
||||||
bp.AddPoint(pt)
|
bp.AddPoint(pt)
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ func ExampleBatchPoints_setters() {
|
||||||
}
|
}
|
||||||
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
|
pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err.Error())
|
fmt.Println("Error: ", err.Error())
|
||||||
}
|
}
|
||||||
bp.AddPoint(pt)
|
bp.AddPoint(pt)
|
||||||
}
|
}
|
||||||
|
@ -165,11 +165,13 @@ func ExampleClient_write1000() {
|
||||||
sampleSize := 1000
|
sampleSize := 1000
|
||||||
|
|
||||||
// Make client
|
// Make client
|
||||||
u, _ := url.Parse("http://localhost:8086")
|
c, err := client.NewHTTPClient(client.HTTPConfig{
|
||||||
clnt := client.NewClient(client.Config{
|
Addr: "http://localhost:8086",
|
||||||
URL: u,
|
|
||||||
})
|
})
|
||||||
defer clnt.Close()
|
if err != nil {
|
||||||
|
fmt.Println("Error creating InfluxDB Client: ", err.Error())
|
||||||
|
}
|
||||||
|
defer c.Close()
|
||||||
|
|
||||||
rand.Seed(42)
|
rand.Seed(42)
|
||||||
|
|
||||||
|
@ -205,44 +207,42 @@ func ExampleClient_write1000() {
|
||||||
bp.AddPoint(pt)
|
bp.AddPoint(pt)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := clnt.Write(bp)
|
err = c.Write(bp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
fmt.Println("Error: ", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make a Query
|
// Make a Query
|
||||||
func ExampleClient_query() {
|
func ExampleClient_query() {
|
||||||
// Make client
|
// Make client
|
||||||
u, _ := url.Parse("http://localhost:8086")
|
c, err := client.NewHTTPClient(client.HTTPConfig{
|
||||||
c := client.NewClient(client.Config{
|
Addr: "http://localhost:8086",
|
||||||
URL: u,
|
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error creating InfluxDB Client: ", err.Error())
|
||||||
|
}
|
||||||
defer c.Close()
|
defer c.Close()
|
||||||
|
|
||||||
q := client.Query{
|
q := client.NewQuery("SELECT count(value) FROM shapes", "square_holes", "ns")
|
||||||
Command: "SELECT count(value) FROM shapes",
|
|
||||||
Database: "square_holes",
|
|
||||||
Precision: "ns",
|
|
||||||
}
|
|
||||||
if response, err := c.Query(q); err == nil && response.Error() == nil {
|
if response, err := c.Query(q); err == nil && response.Error() == nil {
|
||||||
log.Println(response.Results)
|
fmt.Println(response.Results)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a Database with a query
|
// Create a Database with a query
|
||||||
func ExampleClient_createDatabase() {
|
func ExampleClient_createDatabase() {
|
||||||
// Make client
|
// Make client
|
||||||
u, _ := url.Parse("http://localhost:8086")
|
c, err := client.NewHTTPClient(client.HTTPConfig{
|
||||||
c := client.NewClient(client.Config{
|
Addr: "http://localhost:8086",
|
||||||
URL: u,
|
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error creating InfluxDB Client: ", err.Error())
|
||||||
|
}
|
||||||
defer c.Close()
|
defer c.Close()
|
||||||
|
|
||||||
q := client.Query{
|
q := client.NewQuery("CREATE DATABASE telegraf", "", "")
|
||||||
Command: "CREATE DATABASE telegraf",
|
|
||||||
}
|
|
||||||
if response, err := c.Query(q); err == nil && response.Error() == nil {
|
if response, err := c.Query(q); err == nil && response.Error() == nil {
|
||||||
log.Println(response.Results)
|
fmt.Println(response.Results)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
757
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/cli/cli.go
generated
vendored
Normal file
757
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/cli/cli.go
generated
vendored
Normal file
|
@ -0,0 +1,757 @@
|
||||||
|
package cli
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/csv"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"os/user"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"text/tabwriter"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/client"
|
||||||
|
"github.com/influxdb/influxdb/cluster"
|
||||||
|
"github.com/influxdb/influxdb/importer/v8"
|
||||||
|
"github.com/peterh/liner"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
noTokenMsg = "Visit https://enterprise.influxdata.com to register for updates, InfluxDB server management, and monitoring.\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CommandLine struct {
|
||||||
|
Client *client.Client
|
||||||
|
Line *liner.State
|
||||||
|
Host string
|
||||||
|
Port int
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
|
Database string
|
||||||
|
Ssl bool
|
||||||
|
RetentionPolicy string
|
||||||
|
ClientVersion string
|
||||||
|
ServerVersion string
|
||||||
|
Pretty bool // controls pretty print for json
|
||||||
|
Format string // controls the output format. Valid values are json, csv, or column
|
||||||
|
Precision string
|
||||||
|
WriteConsistency string
|
||||||
|
Execute string
|
||||||
|
ShowVersion bool
|
||||||
|
Import bool
|
||||||
|
PPS int // Controls how many points per second the import will allow via throttling
|
||||||
|
Path string
|
||||||
|
Compressed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(version string) *CommandLine {
|
||||||
|
return &CommandLine{ClientVersion: version}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) Run() {
|
||||||
|
var promptForPassword bool
|
||||||
|
// determine if they set the password flag but provided no value
|
||||||
|
for _, v := range os.Args {
|
||||||
|
v = strings.ToLower(v)
|
||||||
|
if (strings.HasPrefix(v, "-password") || strings.HasPrefix(v, "--password")) && c.Password == "" {
|
||||||
|
promptForPassword = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Line = liner.NewLiner()
|
||||||
|
defer c.Line.Close()
|
||||||
|
|
||||||
|
if promptForPassword {
|
||||||
|
p, e := c.Line.PasswordPrompt("password: ")
|
||||||
|
if e != nil {
|
||||||
|
fmt.Println("Unable to parse password.")
|
||||||
|
} else {
|
||||||
|
c.Password = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.Connect(""); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr,
|
||||||
|
"Failed to connect to %s\nPlease check your connection settings and ensure 'influxd' is running.\n",
|
||||||
|
c.Client.Addr())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Execute == "" && !c.Import {
|
||||||
|
token, err := c.DatabaseToken()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to check token: %s\n", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if token == "" {
|
||||||
|
fmt.Printf(noTokenMsg)
|
||||||
|
}
|
||||||
|
fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.ServerVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Execute != "" {
|
||||||
|
// Modify precision before executing query
|
||||||
|
c.SetPrecision(c.Precision)
|
||||||
|
if err := c.ExecuteQuery(c.Execute); err != nil {
|
||||||
|
c.Line.Close()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
c.Line.Close()
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Import {
|
||||||
|
path := net.JoinHostPort(c.Host, strconv.Itoa(c.Port))
|
||||||
|
u, e := client.ParseConnectionString(path, c.Ssl)
|
||||||
|
if e != nil {
|
||||||
|
fmt.Println(e)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
config := v8.NewConfig()
|
||||||
|
config.Username = c.Username
|
||||||
|
config.Password = c.Password
|
||||||
|
config.Precision = "ns"
|
||||||
|
config.WriteConsistency = "any"
|
||||||
|
config.Path = c.Path
|
||||||
|
config.Version = c.ClientVersion
|
||||||
|
config.URL = u
|
||||||
|
config.Compressed = c.Compressed
|
||||||
|
config.PPS = c.PPS
|
||||||
|
config.Precision = c.Precision
|
||||||
|
|
||||||
|
i := v8.NewImporter(config)
|
||||||
|
if err := i.Import(); err != nil {
|
||||||
|
fmt.Printf("ERROR: %s\n", err)
|
||||||
|
c.Line.Close()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
c.Line.Close()
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Version()
|
||||||
|
|
||||||
|
var historyFile string
|
||||||
|
usr, err := user.Current()
|
||||||
|
// Only load history if we can get the user
|
||||||
|
if err == nil {
|
||||||
|
historyFile = filepath.Join(usr.HomeDir, ".influx_history")
|
||||||
|
|
||||||
|
if f, err := os.Open(historyFile); err == nil {
|
||||||
|
c.Line.ReadHistory(f)
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
l, e := c.Line.Prompt("> ")
|
||||||
|
if e != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if c.ParseCommand(l) {
|
||||||
|
// write out the history
|
||||||
|
if len(historyFile) > 0 {
|
||||||
|
c.Line.AppendHistory(l)
|
||||||
|
if f, err := os.Create(historyFile); err == nil {
|
||||||
|
c.Line.WriteHistory(f)
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
break // exit main loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) ParseCommand(cmd string) bool {
|
||||||
|
lcmd := strings.TrimSpace(strings.ToLower(cmd))
|
||||||
|
|
||||||
|
split := strings.Split(lcmd, " ")
|
||||||
|
var tokens []string
|
||||||
|
for _, token := range split {
|
||||||
|
if token != "" {
|
||||||
|
tokens = append(tokens, token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tokens) > 0 {
|
||||||
|
switch tokens[0] {
|
||||||
|
case "":
|
||||||
|
break
|
||||||
|
case "exit":
|
||||||
|
// signal the program to exit
|
||||||
|
return false
|
||||||
|
case "gopher":
|
||||||
|
c.gopher()
|
||||||
|
case "connect":
|
||||||
|
c.Connect(cmd)
|
||||||
|
case "auth":
|
||||||
|
c.SetAuth(cmd)
|
||||||
|
case "help":
|
||||||
|
c.help()
|
||||||
|
case "history":
|
||||||
|
c.history()
|
||||||
|
case "format":
|
||||||
|
c.SetFormat(cmd)
|
||||||
|
case "precision":
|
||||||
|
c.SetPrecision(cmd)
|
||||||
|
case "consistency":
|
||||||
|
c.SetWriteConsistency(cmd)
|
||||||
|
case "settings":
|
||||||
|
c.Settings()
|
||||||
|
case "pretty":
|
||||||
|
c.Pretty = !c.Pretty
|
||||||
|
if c.Pretty {
|
||||||
|
fmt.Println("Pretty print enabled")
|
||||||
|
} else {
|
||||||
|
fmt.Println("Pretty print disabled")
|
||||||
|
}
|
||||||
|
case "use":
|
||||||
|
c.use(cmd)
|
||||||
|
case "insert":
|
||||||
|
c.Insert(cmd)
|
||||||
|
default:
|
||||||
|
c.ExecuteQuery(cmd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect connects client to a server
|
||||||
|
func (c *CommandLine) Connect(cmd string) error {
|
||||||
|
var cl *client.Client
|
||||||
|
var u url.URL
|
||||||
|
|
||||||
|
// Remove the "connect" keyword if it exists
|
||||||
|
path := strings.TrimSpace(strings.Replace(cmd, "connect", "", -1))
|
||||||
|
|
||||||
|
// If they didn't provide a connection string, use the current settings
|
||||||
|
if path == "" {
|
||||||
|
path = net.JoinHostPort(c.Host, strconv.Itoa(c.Port))
|
||||||
|
}
|
||||||
|
|
||||||
|
var e error
|
||||||
|
u, e = client.ParseConnectionString(path, c.Ssl)
|
||||||
|
if e != nil {
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
config := client.NewConfig()
|
||||||
|
config.URL = u
|
||||||
|
config.Username = c.Username
|
||||||
|
config.Password = c.Password
|
||||||
|
config.UserAgent = "InfluxDBShell/" + c.ClientVersion
|
||||||
|
config.Precision = c.Precision
|
||||||
|
cl, err := client.NewClient(config)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not create client %s", err)
|
||||||
|
}
|
||||||
|
c.Client = cl
|
||||||
|
if _, v, e := c.Client.Ping(); e != nil {
|
||||||
|
return fmt.Errorf("Failed to connect to %s\n", c.Client.Addr())
|
||||||
|
} else {
|
||||||
|
c.ServerVersion = v
|
||||||
|
}
|
||||||
|
|
||||||
|
_, c.ServerVersion, _ = c.Client.Ping()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) SetAuth(cmd string) {
|
||||||
|
// If they pass in the entire command, we should parse it
|
||||||
|
// auth <username> <password>
|
||||||
|
args := strings.Fields(cmd)
|
||||||
|
if len(args) == 3 {
|
||||||
|
args = args[1:]
|
||||||
|
} else {
|
||||||
|
args = []string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(args) == 2 {
|
||||||
|
c.Username = args[0]
|
||||||
|
c.Password = args[1]
|
||||||
|
} else {
|
||||||
|
u, e := c.Line.Prompt("username: ")
|
||||||
|
if e != nil {
|
||||||
|
fmt.Printf("Unable to process input: %s", e)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.Username = strings.TrimSpace(u)
|
||||||
|
p, e := c.Line.PasswordPrompt("password: ")
|
||||||
|
if e != nil {
|
||||||
|
fmt.Printf("Unable to process input: %s", e)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.Password = p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the client as well
|
||||||
|
c.Client.SetAuth(c.Username, c.Password)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) use(cmd string) {
|
||||||
|
args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ")
|
||||||
|
if len(args) != 2 {
|
||||||
|
fmt.Printf("Could not parse database name from %q.\n", cmd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d := args[1]
|
||||||
|
c.Database = d
|
||||||
|
fmt.Printf("Using database %s\n", d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) SetPrecision(cmd string) {
|
||||||
|
// Remove the "precision" keyword if it exists
|
||||||
|
cmd = strings.TrimSpace(strings.Replace(cmd, "precision", "", -1))
|
||||||
|
// normalize cmd
|
||||||
|
cmd = strings.ToLower(cmd)
|
||||||
|
|
||||||
|
switch cmd {
|
||||||
|
case "h", "m", "s", "ms", "u", "ns":
|
||||||
|
c.Precision = cmd
|
||||||
|
c.Client.SetPrecision(c.Precision)
|
||||||
|
case "rfc3339":
|
||||||
|
c.Precision = ""
|
||||||
|
c.Client.SetPrecision(c.Precision)
|
||||||
|
default:
|
||||||
|
fmt.Printf("Unknown precision %q. Please use rfc3339, h, m, s, ms, u or ns.\n", cmd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) SetFormat(cmd string) {
|
||||||
|
// Remove the "format" keyword if it exists
|
||||||
|
cmd = strings.TrimSpace(strings.Replace(cmd, "format", "", -1))
|
||||||
|
// normalize cmd
|
||||||
|
cmd = strings.ToLower(cmd)
|
||||||
|
|
||||||
|
switch cmd {
|
||||||
|
case "json", "csv", "column":
|
||||||
|
c.Format = cmd
|
||||||
|
default:
|
||||||
|
fmt.Printf("Unknown format %q. Please use json, csv, or column.\n", cmd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) SetWriteConsistency(cmd string) {
|
||||||
|
// Remove the "consistency" keyword if it exists
|
||||||
|
cmd = strings.TrimSpace(strings.Replace(cmd, "consistency", "", -1))
|
||||||
|
// normalize cmd
|
||||||
|
cmd = strings.ToLower(cmd)
|
||||||
|
|
||||||
|
_, err := cluster.ParseConsistencyLevel(cmd)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Unknown consistency level %q. Please use any, one, quorum, or all.\n", cmd)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.WriteConsistency = cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// isWhitespace returns true if the rune is a space, tab, or newline.
|
||||||
|
func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' }
|
||||||
|
|
||||||
|
// isLetter returns true if the rune is a letter.
|
||||||
|
func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') }
|
||||||
|
|
||||||
|
// isDigit returns true if the rune is a digit.
|
||||||
|
func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') }
|
||||||
|
|
||||||
|
// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer.
|
||||||
|
func isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' }
|
||||||
|
|
||||||
|
// isIdentChar returns true if the rune can be used in an unquoted identifier.
|
||||||
|
func isNotIdentChar(ch rune) bool { return !(isLetter(ch) || isDigit(ch) || ch == '_') }
|
||||||
|
|
||||||
|
func parseUnquotedIdentifier(stmt string) (string, string) {
|
||||||
|
if fields := strings.FieldsFunc(stmt, isNotIdentChar); len(fields) > 0 {
|
||||||
|
return fields[0], strings.TrimPrefix(stmt, fields[0])
|
||||||
|
}
|
||||||
|
return "", stmt
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDoubleQuotedIdentifier(stmt string) (string, string) {
|
||||||
|
escapeNext := false
|
||||||
|
fields := strings.FieldsFunc(stmt, func(ch rune) bool {
|
||||||
|
if ch == '\\' {
|
||||||
|
escapeNext = true
|
||||||
|
} else if ch == '"' {
|
||||||
|
if !escapeNext {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
escapeNext = false
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
if len(fields) > 0 {
|
||||||
|
return fields[0], strings.TrimPrefix(stmt, "\""+fields[0]+"\"")
|
||||||
|
}
|
||||||
|
return "", stmt
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseNextIdentifier(stmt string) (ident, remainder string) {
|
||||||
|
if len(stmt) > 0 {
|
||||||
|
switch {
|
||||||
|
case isWhitespace(rune(stmt[0])):
|
||||||
|
return parseNextIdentifier(stmt[1:])
|
||||||
|
case isIdentFirstChar(rune(stmt[0])):
|
||||||
|
return parseUnquotedIdentifier(stmt)
|
||||||
|
case stmt[0] == '"':
|
||||||
|
return parseDoubleQuotedIdentifier(stmt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", stmt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) parseInto(stmt string) string {
|
||||||
|
ident, stmt := parseNextIdentifier(stmt)
|
||||||
|
if strings.HasPrefix(stmt, ".") {
|
||||||
|
c.Database = ident
|
||||||
|
fmt.Printf("Using database %s\n", c.Database)
|
||||||
|
ident, stmt = parseNextIdentifier(stmt[1:])
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(stmt, " ") {
|
||||||
|
c.RetentionPolicy = ident
|
||||||
|
fmt.Printf("Using retention policy %s\n", c.RetentionPolicy)
|
||||||
|
return stmt[1:]
|
||||||
|
}
|
||||||
|
return stmt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) Insert(stmt string) error {
|
||||||
|
i, point := parseNextIdentifier(stmt)
|
||||||
|
if !strings.EqualFold(i, "insert") {
|
||||||
|
fmt.Printf("ERR: found %s, expected INSERT\n", i)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if i, r := parseNextIdentifier(point); strings.EqualFold(i, "into") {
|
||||||
|
point = c.parseInto(r)
|
||||||
|
}
|
||||||
|
_, err := c.Client.Write(client.BatchPoints{
|
||||||
|
Points: []client.Point{
|
||||||
|
client.Point{Raw: point},
|
||||||
|
},
|
||||||
|
Database: c.Database,
|
||||||
|
RetentionPolicy: c.RetentionPolicy,
|
||||||
|
Precision: "n",
|
||||||
|
WriteConsistency: c.WriteConsistency,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("ERR: %s\n", err)
|
||||||
|
if c.Database == "" {
|
||||||
|
fmt.Println("Note: error may be due to not setting a database or retention policy.")
|
||||||
|
fmt.Println(`Please set a database with the command "use <database>" or`)
|
||||||
|
fmt.Println("INSERT INTO <database>.<retention-policy> <point>")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) ExecuteQuery(query string) error {
|
||||||
|
response, err := c.Client.Query(client.Query{Command: query, Database: c.Database})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("ERR: %s\n", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.FormatResponse(response, os.Stdout)
|
||||||
|
if err := response.Error(); err != nil {
|
||||||
|
fmt.Printf("ERR: %s\n", response.Error())
|
||||||
|
if c.Database == "" {
|
||||||
|
fmt.Println("Warning: It is possible this error is due to not setting a database.")
|
||||||
|
fmt.Println(`Please set a database with the command "use <database>".`)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) DatabaseToken() (string, error) {
|
||||||
|
response, err := c.Client.Query(client.Query{Command: "SHOW DIAGNOSTICS for 'registration'"})
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if response.Error() != nil || len((*response).Results[0].Series) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for position of "token" column.
|
||||||
|
for i, s := range (*response).Results[0].Series[0].Columns {
|
||||||
|
if s == "token" {
|
||||||
|
return (*response).Results[0].Series[0].Values[0][i].(string), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) FormatResponse(response *client.Response, w io.Writer) {
|
||||||
|
switch c.Format {
|
||||||
|
case "json":
|
||||||
|
c.writeJSON(response, w)
|
||||||
|
case "csv":
|
||||||
|
c.writeCSV(response, w)
|
||||||
|
case "column":
|
||||||
|
c.writeColumns(response, w)
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(w, "Unknown output format %q.\n", c.Format)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) writeJSON(response *client.Response, w io.Writer) {
|
||||||
|
var data []byte
|
||||||
|
var err error
|
||||||
|
if c.Pretty {
|
||||||
|
data, err = json.MarshalIndent(response, "", " ")
|
||||||
|
} else {
|
||||||
|
data, err = json.Marshal(response)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(w, "Unable to parse json: %s\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Fprintln(w, string(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) writeCSV(response *client.Response, w io.Writer) {
|
||||||
|
csvw := csv.NewWriter(w)
|
||||||
|
for _, result := range response.Results {
|
||||||
|
// Create a tabbed writer for each result as they won't always line up
|
||||||
|
rows := c.formatResults(result, "\t")
|
||||||
|
for _, r := range rows {
|
||||||
|
csvw.Write(strings.Split(r, "\t"))
|
||||||
|
}
|
||||||
|
csvw.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) writeColumns(response *client.Response, w io.Writer) {
|
||||||
|
for _, result := range response.Results {
|
||||||
|
// Create a tabbed writer for each result a they won't always line up
|
||||||
|
w := new(tabwriter.Writer)
|
||||||
|
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
|
||||||
|
csv := c.formatResults(result, "\t")
|
||||||
|
for _, r := range csv {
|
||||||
|
fmt.Fprintln(w, r)
|
||||||
|
}
|
||||||
|
w.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatResults will behave differently if you are formatting for columns or csv
|
||||||
|
func (c *CommandLine) formatResults(result client.Result, separator string) []string {
|
||||||
|
rows := []string{}
|
||||||
|
// Create a tabbed writer for each result a they won't always line up
|
||||||
|
for i, row := range result.Series {
|
||||||
|
// gather tags
|
||||||
|
tags := []string{}
|
||||||
|
for k, v := range row.Tags {
|
||||||
|
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
|
||||||
|
sort.Strings(tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
columnNames := []string{}
|
||||||
|
|
||||||
|
// Only put name/tags in a column if format is csv
|
||||||
|
if c.Format == "csv" {
|
||||||
|
if len(tags) > 0 {
|
||||||
|
columnNames = append([]string{"tags"}, columnNames...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if row.Name != "" {
|
||||||
|
columnNames = append([]string{"name"}, columnNames...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, column := range row.Columns {
|
||||||
|
columnNames = append(columnNames, column)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output a line separator if we have more than one set or results and format is column
|
||||||
|
if i > 0 && c.Format == "column" {
|
||||||
|
rows = append(rows, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we are column format, we break out the name/tag to seperate lines
|
||||||
|
if c.Format == "column" {
|
||||||
|
if row.Name != "" {
|
||||||
|
n := fmt.Sprintf("name: %s", row.Name)
|
||||||
|
rows = append(rows, n)
|
||||||
|
if len(tags) == 0 {
|
||||||
|
l := strings.Repeat("-", len(n))
|
||||||
|
rows = append(rows, l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(tags) > 0 {
|
||||||
|
t := fmt.Sprintf("tags: %s", (strings.Join(tags, ", ")))
|
||||||
|
rows = append(rows, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rows = append(rows, strings.Join(columnNames, separator))
|
||||||
|
|
||||||
|
// if format is column, break tags to their own line/format
|
||||||
|
if c.Format == "column" && len(tags) > 0 {
|
||||||
|
lines := []string{}
|
||||||
|
for _, columnName := range columnNames {
|
||||||
|
lines = append(lines, strings.Repeat("-", len(columnName)))
|
||||||
|
}
|
||||||
|
rows = append(rows, strings.Join(lines, separator))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range row.Values {
|
||||||
|
var values []string
|
||||||
|
if c.Format == "csv" {
|
||||||
|
if row.Name != "" {
|
||||||
|
values = append(values, row.Name)
|
||||||
|
}
|
||||||
|
if len(tags) > 0 {
|
||||||
|
values = append(values, strings.Join(tags, ","))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, vv := range v {
|
||||||
|
values = append(values, interfaceToString(vv))
|
||||||
|
}
|
||||||
|
rows = append(rows, strings.Join(values, separator))
|
||||||
|
}
|
||||||
|
// Outout a line separator if in column format
|
||||||
|
if c.Format == "column" {
|
||||||
|
rows = append(rows, "")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rows
|
||||||
|
}
|
||||||
|
|
||||||
|
func interfaceToString(v interface{}) string {
|
||||||
|
switch t := v.(type) {
|
||||||
|
case nil:
|
||||||
|
return ""
|
||||||
|
case bool:
|
||||||
|
return fmt.Sprintf("%v", v)
|
||||||
|
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr:
|
||||||
|
return fmt.Sprintf("%d", t)
|
||||||
|
case float32, float64:
|
||||||
|
return fmt.Sprintf("%v", t)
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("%v", t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) Settings() {
|
||||||
|
w := new(tabwriter.Writer)
|
||||||
|
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
|
||||||
|
if c.Port > 0 {
|
||||||
|
fmt.Fprintf(w, "Host\t%s:%d\n", c.Host, c.Port)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(w, "Host\t%s\n", c.Host)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "Username\t%s\n", c.Username)
|
||||||
|
fmt.Fprintf(w, "Database\t%s\n", c.Database)
|
||||||
|
fmt.Fprintf(w, "Pretty\t%v\n", c.Pretty)
|
||||||
|
fmt.Fprintf(w, "Format\t%s\n", c.Format)
|
||||||
|
fmt.Fprintf(w, "Write Consistency\t%s\n", c.WriteConsistency)
|
||||||
|
fmt.Fprintln(w)
|
||||||
|
w.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) help() {
|
||||||
|
fmt.Println(`Usage:
|
||||||
|
connect <host:port> connect to another node
|
||||||
|
auth prompt for username and password
|
||||||
|
pretty toggle pretty print
|
||||||
|
use <db_name> set current databases
|
||||||
|
format <format> set the output format: json, csv, or column
|
||||||
|
precision <format> set the timestamp format: h,m,s,ms,u,ns
|
||||||
|
consistency <level> set write consistency level: any, one, quorum, or all
|
||||||
|
settings output the current settings for the shell
|
||||||
|
exit quit the influx shell
|
||||||
|
|
||||||
|
show databases show database names
|
||||||
|
show series show series information
|
||||||
|
show measurements show measurement information
|
||||||
|
show tag keys show tag key information
|
||||||
|
show tag values show tag value information
|
||||||
|
|
||||||
|
a full list of influxql commands can be found at:
|
||||||
|
https://influxdb.com/docs/v0.9/query_language/spec.html
|
||||||
|
`)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) history() {
|
||||||
|
usr, err := user.Current()
|
||||||
|
// Only load history if we can get the user
|
||||||
|
if err == nil {
|
||||||
|
historyFile := filepath.Join(usr.HomeDir, ".influx_history")
|
||||||
|
if history, err := ioutil.ReadFile(historyFile); err == nil {
|
||||||
|
fmt.Print(string(history))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) gopher() {
|
||||||
|
fmt.Println(`
|
||||||
|
.-::-::://:-::- .:/++/'
|
||||||
|
'://:-''/oo+//++o+/.://o- ./+:
|
||||||
|
.:-. '++- .o/ '+yydhy' o-
|
||||||
|
.:/. .h: :osoys .smMN- :/
|
||||||
|
-/:.' s- /MMMymh. '/y/ s'
|
||||||
|
-+s:'''' d -mMMms// '-/o:
|
||||||
|
-/++/++/////:. o: '... s- :s.
|
||||||
|
:+-+s-' ':/' 's- /+ 'o:
|
||||||
|
'+-'o: /ydhsh. '//. '-o- o-
|
||||||
|
.y. o: .MMMdm+y ':+++:::/+:.' s:
|
||||||
|
.-h/ y- 'sdmds'h -+ydds:::-.' 'h.
|
||||||
|
.//-.d' o: '.' 'dsNMMMNh:.:++' :y
|
||||||
|
+y. 'd 's. .s:mddds: ++ o/
|
||||||
|
'N- odd 'o/. './o-s-' .---+++' o-
|
||||||
|
'N' yNd .://:/:::::. -s -+/s/./s' 'o/'
|
||||||
|
so' .h '''' ////s: '+. .s +y'
|
||||||
|
os/-.y' 's' 'y::+ +d'
|
||||||
|
'.:o/ -+:-:.' so.---.'
|
||||||
|
o' 'd-.''/s'
|
||||||
|
.s' :y.''.y
|
||||||
|
-s mo:::'
|
||||||
|
:: yh
|
||||||
|
// '''' /M'
|
||||||
|
o+ .s///:/. 'N:
|
||||||
|
:+ /: -s' ho
|
||||||
|
's- -/s/:+/.+h' +h
|
||||||
|
ys' ':' '-. -d
|
||||||
|
oh .h
|
||||||
|
/o .s
|
||||||
|
s. .h
|
||||||
|
-y .d
|
||||||
|
m/ -h
|
||||||
|
+d /o
|
||||||
|
'N- y:
|
||||||
|
h: m.
|
||||||
|
s- -d
|
||||||
|
o- s+
|
||||||
|
+- 'm'
|
||||||
|
s/ oo--.
|
||||||
|
y- /s ':+'
|
||||||
|
s' 'od--' .d:
|
||||||
|
-+ ':o: ':+-/+
|
||||||
|
y- .:+- '
|
||||||
|
//o- '.:+/.
|
||||||
|
.-:+/' ''-/+/.
|
||||||
|
./:' ''.:o+/-'
|
||||||
|
.+o:/:/+-' ''.-+ooo/-'
|
||||||
|
o: -h///++////-.
|
||||||
|
/: .o/
|
||||||
|
//+ 'y
|
||||||
|
./sooy.
|
||||||
|
|
||||||
|
`)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CommandLine) Version() {
|
||||||
|
fmt.Println("InfluxDB shell " + c.ClientVersion)
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package main_test
|
package cli_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
@ -8,19 +8,18 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/client"
|
"github.com/influxdb/influxdb/client"
|
||||||
main "github.com/influxdb/influxdb/cmd/influx"
|
"github.com/influxdb/influxdb/cmd/influx/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParseCommand_CommandsExist(t *testing.T) {
|
func TestParseCommand_CommandsExist(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
c := main.CommandLine{}
|
c := cli.CommandLine{}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
cmd string
|
cmd string
|
||||||
}{
|
}{
|
||||||
{cmd: "gopher"},
|
{cmd: "gopher"},
|
||||||
{cmd: "connect"},
|
{cmd: "connect"},
|
||||||
{cmd: "help"},
|
{cmd: "help"},
|
||||||
{cmd: "history"},
|
|
||||||
{cmd: "pretty"},
|
{cmd: "pretty"},
|
||||||
{cmd: "use"},
|
{cmd: "use"},
|
||||||
{cmd: ""}, // test that a blank command just returns
|
{cmd: ""}, // test that a blank command just returns
|
||||||
|
@ -32,45 +31,9 @@ func TestParseCommand_CommandsExist(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseCommand_CommandsSamePrefix(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
var data client.Response
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
_ = json.NewEncoder(w).Encode(data)
|
|
||||||
}))
|
|
||||||
defer ts.Close()
|
|
||||||
|
|
||||||
u, _ := url.Parse(ts.URL)
|
|
||||||
config := client.Config{URL: *u}
|
|
||||||
c, err := client.NewClient(config)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
|
||||||
}
|
|
||||||
m := main.CommandLine{Client: c}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
cmd string
|
|
||||||
}{
|
|
||||||
{cmd: "use db"},
|
|
||||||
{cmd: "user nodb"},
|
|
||||||
{cmd: "puse nodb"},
|
|
||||||
{cmd: ""}, // test that a blank command just returns
|
|
||||||
}
|
|
||||||
for _, test := range tests {
|
|
||||||
if !m.ParseCommand(test.cmd) {
|
|
||||||
t.Fatalf(`Command failed for %q.`, test.cmd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.Database != "db" {
|
|
||||||
t.Fatalf(`Command "use" changed database to %q. Expected db`, m.Database)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseCommand_TogglePretty(t *testing.T) {
|
func TestParseCommand_TogglePretty(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
c := main.CommandLine{}
|
c := cli.CommandLine{}
|
||||||
if c.Pretty {
|
if c.Pretty {
|
||||||
t.Fatalf(`Pretty should be false.`)
|
t.Fatalf(`Pretty should be false.`)
|
||||||
}
|
}
|
||||||
|
@ -86,7 +49,7 @@ func TestParseCommand_TogglePretty(t *testing.T) {
|
||||||
|
|
||||||
func TestParseCommand_Exit(t *testing.T) {
|
func TestParseCommand_Exit(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
c := main.CommandLine{}
|
c := cli.CommandLine{}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
cmd string
|
cmd string
|
||||||
}{
|
}{
|
||||||
|
@ -105,7 +68,7 @@ func TestParseCommand_Exit(t *testing.T) {
|
||||||
|
|
||||||
func TestParseCommand_Use(t *testing.T) {
|
func TestParseCommand_Use(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
c := main.CommandLine{}
|
c := cli.CommandLine{}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
cmd string
|
cmd string
|
||||||
}{
|
}{
|
||||||
|
@ -130,7 +93,7 @@ func TestParseCommand_Use(t *testing.T) {
|
||||||
|
|
||||||
func TestParseCommand_Consistency(t *testing.T) {
|
func TestParseCommand_Consistency(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
c := main.CommandLine{}
|
c := cli.CommandLine{}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
cmd string
|
cmd string
|
||||||
}{
|
}{
|
||||||
|
@ -168,7 +131,7 @@ func TestParseCommand_Insert(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
||||||
}
|
}
|
||||||
m := main.CommandLine{Client: c}
|
m := cli.CommandLine{Client: c}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
cmd string
|
cmd string
|
||||||
|
@ -205,7 +168,7 @@ func TestParseCommand_InsertInto(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
||||||
}
|
}
|
||||||
m := main.CommandLine{Client: c}
|
m := cli.CommandLine{Client: c}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
cmd, db, rp string
|
cmd, db, rp string
|
||||||
|
@ -257,7 +220,7 @@ func TestParseCommand_InsertInto(t *testing.T) {
|
||||||
|
|
||||||
func TestParseCommand_History(t *testing.T) {
|
func TestParseCommand_History(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
c := main.CommandLine{}
|
c := cli.CommandLine{}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
cmd string
|
cmd string
|
||||||
}{
|
}{
|
|
@ -1,31 +1,17 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/csv"
|
|
||||||
"encoding/json"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"text/tabwriter"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/client"
|
"github.com/influxdb/influxdb/client"
|
||||||
"github.com/influxdb/influxdb/cluster"
|
"github.com/influxdb/influxdb/cmd/influx/cli"
|
||||||
"github.com/influxdb/influxdb/importer/v8"
|
|
||||||
"github.com/peterh/liner"
|
|
||||||
"io/ioutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// These variables are populated via the Go linker.
|
// These variables are populated via the Go linker.
|
||||||
var (
|
var (
|
||||||
version string = "0.9"
|
version = "0.9"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -40,35 +26,8 @@ const (
|
||||||
defaultPPS = 0
|
defaultPPS = 0
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
noTokenMsg = "Visit https://enterprise.influxdata.com to register for updates, InfluxDB server management, and monitoring.\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
type CommandLine struct {
|
|
||||||
Client *client.Client
|
|
||||||
Line *liner.State
|
|
||||||
Host string
|
|
||||||
Port int
|
|
||||||
Username string
|
|
||||||
Password string
|
|
||||||
Database string
|
|
||||||
Ssl bool
|
|
||||||
RetentionPolicy string
|
|
||||||
Version string
|
|
||||||
Pretty bool // controls pretty print for json
|
|
||||||
Format string // controls the output format. Valid values are json, csv, or column
|
|
||||||
Precision string
|
|
||||||
WriteConsistency string
|
|
||||||
Execute string
|
|
||||||
ShowVersion bool
|
|
||||||
Import bool
|
|
||||||
PPS int // Controls how many points per second the import will allow via throttling
|
|
||||||
Path string
|
|
||||||
Compressed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
c := CommandLine{}
|
c := cli.New(version)
|
||||||
|
|
||||||
fs := flag.NewFlagSet("InfluxDB shell version "+version, flag.ExitOnError)
|
fs := flag.NewFlagSet("InfluxDB shell version "+version, flag.ExitOnError)
|
||||||
fs.StringVar(&c.Host, "host", client.DefaultHost, "Influxdb host to connect to.")
|
fs.StringVar(&c.Host, "host", client.DefaultHost, "Influxdb host to connect to.")
|
||||||
|
@ -136,703 +95,9 @@ Examples:
|
||||||
fs.Parse(os.Args[1:])
|
fs.Parse(os.Args[1:])
|
||||||
|
|
||||||
if c.ShowVersion {
|
if c.ShowVersion {
|
||||||
showVersion()
|
c.Version()
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
var promptForPassword bool
|
c.Run()
|
||||||
// determine if they set the password flag but provided no value
|
|
||||||
for _, v := range os.Args {
|
|
||||||
v = strings.ToLower(v)
|
|
||||||
if (strings.HasPrefix(v, "-password") || strings.HasPrefix(v, "--password")) && c.Password == "" {
|
|
||||||
promptForPassword = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Line = liner.NewLiner()
|
|
||||||
defer c.Line.Close()
|
|
||||||
|
|
||||||
if promptForPassword {
|
|
||||||
p, e := c.Line.PasswordPrompt("password: ")
|
|
||||||
if e != nil {
|
|
||||||
fmt.Println("Unable to parse password.")
|
|
||||||
} else {
|
|
||||||
c.Password = p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.connect(""); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr,
|
|
||||||
"Failed to connect to %s\nPlease check your connection settings and ensure 'influxd' is running.\n",
|
|
||||||
c.Client.Addr())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Execute == "" && !c.Import {
|
|
||||||
token, err := c.DatabaseToken()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to check token: %s\n", err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if token == "" {
|
|
||||||
fmt.Printf(noTokenMsg)
|
|
||||||
}
|
|
||||||
fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.Version)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Execute != "" {
|
|
||||||
// Modify precision before executing query
|
|
||||||
c.SetPrecision(c.Precision)
|
|
||||||
if err := c.ExecuteQuery(c.Execute); err != nil {
|
|
||||||
c.Line.Close()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
c.Line.Close()
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Import {
|
|
||||||
path := net.JoinHostPort(c.Host, strconv.Itoa(c.Port))
|
|
||||||
u, e := client.ParseConnectionString(path, c.Ssl)
|
|
||||||
if e != nil {
|
|
||||||
fmt.Println(e)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
config := v8.NewConfig()
|
|
||||||
config.Username = c.Username
|
|
||||||
config.Password = c.Password
|
|
||||||
config.Precision = "ns"
|
|
||||||
config.WriteConsistency = "any"
|
|
||||||
config.Path = c.Path
|
|
||||||
config.Version = version
|
|
||||||
config.URL = u
|
|
||||||
config.Compressed = c.Compressed
|
|
||||||
config.PPS = c.PPS
|
|
||||||
config.Precision = c.Precision
|
|
||||||
|
|
||||||
i := v8.NewImporter(config)
|
|
||||||
if err := i.Import(); err != nil {
|
|
||||||
fmt.Printf("ERROR: %s\n", err)
|
|
||||||
c.Line.Close()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
c.Line.Close()
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
showVersion()
|
|
||||||
|
|
||||||
var historyFile string
|
|
||||||
usr, err := user.Current()
|
|
||||||
// Only load history if we can get the user
|
|
||||||
if err == nil {
|
|
||||||
historyFile = filepath.Join(usr.HomeDir, ".influx_history")
|
|
||||||
|
|
||||||
if f, err := os.Open(historyFile); err == nil {
|
|
||||||
c.Line.ReadHistory(f)
|
|
||||||
f.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
l, e := c.Line.Prompt("> ")
|
|
||||||
if e != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if c.ParseCommand(l) {
|
|
||||||
// write out the history
|
|
||||||
if len(historyFile) > 0 {
|
|
||||||
c.Line.AppendHistory(l)
|
|
||||||
if f, err := os.Create(historyFile); err == nil {
|
|
||||||
c.Line.WriteHistory(f)
|
|
||||||
f.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
break // exit main loop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func showVersion() {
|
|
||||||
fmt.Println("InfluxDB shell " + version)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) ParseCommand(cmd string) bool {
|
|
||||||
lcmd := strings.TrimSpace(strings.ToLower(cmd))
|
|
||||||
|
|
||||||
split := strings.Split(lcmd, " ")
|
|
||||||
var tokens []string
|
|
||||||
for _, token := range split {
|
|
||||||
if token != "" {
|
|
||||||
tokens = append(tokens, token)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(tokens) > 0 {
|
|
||||||
switch tokens[0] {
|
|
||||||
case "":
|
|
||||||
break
|
|
||||||
case "exit":
|
|
||||||
// signal the program to exit
|
|
||||||
return false
|
|
||||||
case "gopher":
|
|
||||||
c.gopher()
|
|
||||||
case "connect":
|
|
||||||
c.connect(cmd)
|
|
||||||
case "auth":
|
|
||||||
c.SetAuth(cmd)
|
|
||||||
case "help":
|
|
||||||
c.help()
|
|
||||||
case "history":
|
|
||||||
c.history()
|
|
||||||
case "format":
|
|
||||||
c.SetFormat(cmd)
|
|
||||||
case "precision":
|
|
||||||
c.SetPrecision(cmd)
|
|
||||||
case "consistency":
|
|
||||||
c.SetWriteConsistency(cmd)
|
|
||||||
case "settings":
|
|
||||||
c.Settings()
|
|
||||||
case "pretty":
|
|
||||||
c.Pretty = !c.Pretty
|
|
||||||
if c.Pretty {
|
|
||||||
fmt.Println("Pretty print enabled")
|
|
||||||
} else {
|
|
||||||
fmt.Println("Pretty print disabled")
|
|
||||||
}
|
|
||||||
case "use":
|
|
||||||
c.use(cmd)
|
|
||||||
case "insert":
|
|
||||||
c.Insert(cmd)
|
|
||||||
default:
|
|
||||||
c.ExecuteQuery(cmd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) connect(cmd string) error {
|
|
||||||
var cl *client.Client
|
|
||||||
var u url.URL
|
|
||||||
|
|
||||||
// Remove the "connect" keyword if it exists
|
|
||||||
path := strings.TrimSpace(strings.Replace(cmd, "connect", "", -1))
|
|
||||||
|
|
||||||
// If they didn't provide a connection string, use the current settings
|
|
||||||
if path == "" {
|
|
||||||
path = net.JoinHostPort(c.Host, strconv.Itoa(c.Port))
|
|
||||||
}
|
|
||||||
|
|
||||||
var e error
|
|
||||||
u, e = client.ParseConnectionString(path, c.Ssl)
|
|
||||||
if e != nil {
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
config := client.NewConfig()
|
|
||||||
config.URL = u
|
|
||||||
config.Username = c.Username
|
|
||||||
config.Password = c.Password
|
|
||||||
config.UserAgent = "InfluxDBShell/" + version
|
|
||||||
config.Precision = c.Precision
|
|
||||||
cl, err := client.NewClient(config)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Could not create client %s", err)
|
|
||||||
}
|
|
||||||
c.Client = cl
|
|
||||||
if _, v, e := c.Client.Ping(); e != nil {
|
|
||||||
return fmt.Errorf("Failed to connect to %s\n", c.Client.Addr())
|
|
||||||
} else {
|
|
||||||
c.Version = v
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) SetAuth(cmd string) {
|
|
||||||
// If they pass in the entire command, we should parse it
|
|
||||||
// auth <username> <password>
|
|
||||||
args := strings.Fields(cmd)
|
|
||||||
if len(args) == 3 {
|
|
||||||
args = args[1:]
|
|
||||||
} else {
|
|
||||||
args = []string{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(args) == 2 {
|
|
||||||
c.Username = args[0]
|
|
||||||
c.Password = args[1]
|
|
||||||
} else {
|
|
||||||
u, e := c.Line.Prompt("username: ")
|
|
||||||
if e != nil {
|
|
||||||
fmt.Printf("Unable to process input: %s", e)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.Username = strings.TrimSpace(u)
|
|
||||||
p, e := c.Line.PasswordPrompt("password: ")
|
|
||||||
if e != nil {
|
|
||||||
fmt.Printf("Unable to process input: %s", e)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.Password = p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the client as well
|
|
||||||
c.Client.SetAuth(c.Username, c.Password)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) use(cmd string) {
|
|
||||||
args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ")
|
|
||||||
if len(args) != 2 {
|
|
||||||
fmt.Printf("Could not parse database name from %q.\n", cmd)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d := args[1]
|
|
||||||
c.Database = d
|
|
||||||
fmt.Printf("Using database %s\n", d)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) SetPrecision(cmd string) {
|
|
||||||
// Remove the "precision" keyword if it exists
|
|
||||||
cmd = strings.TrimSpace(strings.Replace(cmd, "precision", "", -1))
|
|
||||||
// normalize cmd
|
|
||||||
cmd = strings.ToLower(cmd)
|
|
||||||
|
|
||||||
switch cmd {
|
|
||||||
case "h", "m", "s", "ms", "u", "ns":
|
|
||||||
c.Precision = cmd
|
|
||||||
c.Client.SetPrecision(c.Precision)
|
|
||||||
case "rfc3339":
|
|
||||||
c.Precision = ""
|
|
||||||
c.Client.SetPrecision(c.Precision)
|
|
||||||
default:
|
|
||||||
fmt.Printf("Unknown precision %q. Please use rfc3339, h, m, s, ms, u or ns.\n", cmd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) SetFormat(cmd string) {
|
|
||||||
// Remove the "format" keyword if it exists
|
|
||||||
cmd = strings.TrimSpace(strings.Replace(cmd, "format", "", -1))
|
|
||||||
// normalize cmd
|
|
||||||
cmd = strings.ToLower(cmd)
|
|
||||||
|
|
||||||
switch cmd {
|
|
||||||
case "json", "csv", "column":
|
|
||||||
c.Format = cmd
|
|
||||||
default:
|
|
||||||
fmt.Printf("Unknown format %q. Please use json, csv, or column.\n", cmd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) SetWriteConsistency(cmd string) {
|
|
||||||
// Remove the "consistency" keyword if it exists
|
|
||||||
cmd = strings.TrimSpace(strings.Replace(cmd, "consistency", "", -1))
|
|
||||||
// normalize cmd
|
|
||||||
cmd = strings.ToLower(cmd)
|
|
||||||
|
|
||||||
_, err := cluster.ParseConsistencyLevel(cmd)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Unknown consistency level %q. Please use any, one, quorum, or all.\n", cmd)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.WriteConsistency = cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// isWhitespace returns true if the rune is a space, tab, or newline.
|
|
||||||
func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' }
|
|
||||||
|
|
||||||
// isLetter returns true if the rune is a letter.
|
|
||||||
func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') }
|
|
||||||
|
|
||||||
// isDigit returns true if the rune is a digit.
|
|
||||||
func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') }
|
|
||||||
|
|
||||||
// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer.
|
|
||||||
func isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' }
|
|
||||||
|
|
||||||
// isIdentChar returns true if the rune can be used in an unquoted identifier.
|
|
||||||
func isNotIdentChar(ch rune) bool { return !(isLetter(ch) || isDigit(ch) || ch == '_') }
|
|
||||||
|
|
||||||
func parseUnquotedIdentifier(stmt string) (string, string) {
|
|
||||||
if fields := strings.FieldsFunc(stmt, isNotIdentChar); len(fields) > 0 {
|
|
||||||
return fields[0], strings.TrimPrefix(stmt, fields[0])
|
|
||||||
}
|
|
||||||
return "", stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseDoubleQuotedIdentifier(stmt string) (string, string) {
|
|
||||||
escapeNext := false
|
|
||||||
fields := strings.FieldsFunc(stmt, func(ch rune) bool {
|
|
||||||
if ch == '\\' {
|
|
||||||
escapeNext = true
|
|
||||||
} else if ch == '"' {
|
|
||||||
if !escapeNext {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
escapeNext = false
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
if len(fields) > 0 {
|
|
||||||
return fields[0], strings.TrimPrefix(stmt, "\""+fields[0]+"\"")
|
|
||||||
}
|
|
||||||
return "", stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseNextIdentifier(stmt string) (ident, remainder string) {
|
|
||||||
if len(stmt) > 0 {
|
|
||||||
switch {
|
|
||||||
case isWhitespace(rune(stmt[0])):
|
|
||||||
return parseNextIdentifier(stmt[1:])
|
|
||||||
case isIdentFirstChar(rune(stmt[0])):
|
|
||||||
return parseUnquotedIdentifier(stmt)
|
|
||||||
case stmt[0] == '"':
|
|
||||||
return parseDoubleQuotedIdentifier(stmt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) parseInto(stmt string) string {
|
|
||||||
ident, stmt := parseNextIdentifier(stmt)
|
|
||||||
if strings.HasPrefix(stmt, ".") {
|
|
||||||
c.Database = ident
|
|
||||||
fmt.Printf("Using database %s\n", c.Database)
|
|
||||||
ident, stmt = parseNextIdentifier(stmt[1:])
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(stmt, " ") {
|
|
||||||
c.RetentionPolicy = ident
|
|
||||||
fmt.Printf("Using retention policy %s\n", c.RetentionPolicy)
|
|
||||||
return stmt[1:]
|
|
||||||
}
|
|
||||||
return stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) Insert(stmt string) error {
|
|
||||||
i, point := parseNextIdentifier(stmt)
|
|
||||||
if !strings.EqualFold(i, "insert") {
|
|
||||||
fmt.Printf("ERR: found %s, expected INSERT\n", i)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if i, r := parseNextIdentifier(point); strings.EqualFold(i, "into") {
|
|
||||||
point = c.parseInto(r)
|
|
||||||
}
|
|
||||||
_, err := c.Client.Write(client.BatchPoints{
|
|
||||||
Points: []client.Point{
|
|
||||||
client.Point{Raw: point},
|
|
||||||
},
|
|
||||||
Database: c.Database,
|
|
||||||
RetentionPolicy: c.RetentionPolicy,
|
|
||||||
Precision: "n",
|
|
||||||
WriteConsistency: c.WriteConsistency,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("ERR: %s\n", err)
|
|
||||||
if c.Database == "" {
|
|
||||||
fmt.Println("Note: error may be due to not setting a database or retention policy.")
|
|
||||||
fmt.Println(`Please set a database with the command "use <database>" or`)
|
|
||||||
fmt.Println("INSERT INTO <database>.<retention-policy> <point>")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) ExecuteQuery(query string) error {
|
|
||||||
response, err := c.Client.Query(client.Query{Command: query, Database: c.Database})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("ERR: %s\n", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.FormatResponse(response, os.Stdout)
|
|
||||||
if err := response.Error(); err != nil {
|
|
||||||
fmt.Printf("ERR: %s\n", response.Error())
|
|
||||||
if c.Database == "" {
|
|
||||||
fmt.Println("Warning: It is possible this error is due to not setting a database.")
|
|
||||||
fmt.Println(`Please set a database with the command "use <database>".`)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) DatabaseToken() (string, error) {
|
|
||||||
response, err := c.Client.Query(client.Query{Command: "SHOW DIAGNOSTICS for 'registration'"})
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if response.Error() != nil || len((*response).Results[0].Series) == 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for position of "token" column.
|
|
||||||
for i, s := range (*response).Results[0].Series[0].Columns {
|
|
||||||
if s == "token" {
|
|
||||||
return (*response).Results[0].Series[0].Values[0][i].(string), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) FormatResponse(response *client.Response, w io.Writer) {
|
|
||||||
switch c.Format {
|
|
||||||
case "json":
|
|
||||||
c.writeJSON(response, w)
|
|
||||||
case "csv":
|
|
||||||
c.writeCSV(response, w)
|
|
||||||
case "column":
|
|
||||||
c.writeColumns(response, w)
|
|
||||||
default:
|
|
||||||
fmt.Fprintf(w, "Unknown output format %q.\n", c.Format)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) writeJSON(response *client.Response, w io.Writer) {
|
|
||||||
var data []byte
|
|
||||||
var err error
|
|
||||||
if c.Pretty {
|
|
||||||
data, err = json.MarshalIndent(response, "", " ")
|
|
||||||
} else {
|
|
||||||
data, err = json.Marshal(response)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(w, "Unable to parse json: %s\n", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fmt.Fprintln(w, string(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) writeCSV(response *client.Response, w io.Writer) {
|
|
||||||
csvw := csv.NewWriter(w)
|
|
||||||
for _, result := range response.Results {
|
|
||||||
// Create a tabbed writer for each result as they won't always line up
|
|
||||||
rows := c.formatResults(result, "\t")
|
|
||||||
for _, r := range rows {
|
|
||||||
csvw.Write(strings.Split(r, "\t"))
|
|
||||||
}
|
|
||||||
csvw.Flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) writeColumns(response *client.Response, w io.Writer) {
|
|
||||||
for _, result := range response.Results {
|
|
||||||
// Create a tabbed writer for each result a they won't always line up
|
|
||||||
w := new(tabwriter.Writer)
|
|
||||||
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
|
|
||||||
csv := c.formatResults(result, "\t")
|
|
||||||
for _, r := range csv {
|
|
||||||
fmt.Fprintln(w, r)
|
|
||||||
}
|
|
||||||
w.Flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatResults will behave differently if you are formatting for columns or csv
|
|
||||||
func (c *CommandLine) formatResults(result client.Result, separator string) []string {
|
|
||||||
rows := []string{}
|
|
||||||
// Create a tabbed writer for each result a they won't always line up
|
|
||||||
for i, row := range result.Series {
|
|
||||||
// gather tags
|
|
||||||
tags := []string{}
|
|
||||||
for k, v := range row.Tags {
|
|
||||||
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
|
|
||||||
sort.Strings(tags)
|
|
||||||
}
|
|
||||||
|
|
||||||
columnNames := []string{}
|
|
||||||
|
|
||||||
// Only put name/tags in a column if format is csv
|
|
||||||
if c.Format == "csv" {
|
|
||||||
if len(tags) > 0 {
|
|
||||||
columnNames = append([]string{"tags"}, columnNames...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if row.Name != "" {
|
|
||||||
columnNames = append([]string{"name"}, columnNames...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, column := range row.Columns {
|
|
||||||
columnNames = append(columnNames, column)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output a line separator if we have more than one set or results and format is column
|
|
||||||
if i > 0 && c.Format == "column" {
|
|
||||||
rows = append(rows, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we are column format, we break out the name/tag to seperate lines
|
|
||||||
if c.Format == "column" {
|
|
||||||
if row.Name != "" {
|
|
||||||
n := fmt.Sprintf("name: %s", row.Name)
|
|
||||||
rows = append(rows, n)
|
|
||||||
if len(tags) == 0 {
|
|
||||||
l := strings.Repeat("-", len(n))
|
|
||||||
rows = append(rows, l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(tags) > 0 {
|
|
||||||
t := fmt.Sprintf("tags: %s", (strings.Join(tags, ", ")))
|
|
||||||
rows = append(rows, t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rows = append(rows, strings.Join(columnNames, separator))
|
|
||||||
|
|
||||||
// if format is column, break tags to their own line/format
|
|
||||||
if c.Format == "column" && len(tags) > 0 {
|
|
||||||
lines := []string{}
|
|
||||||
for _, columnName := range columnNames {
|
|
||||||
lines = append(lines, strings.Repeat("-", len(columnName)))
|
|
||||||
}
|
|
||||||
rows = append(rows, strings.Join(lines, separator))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range row.Values {
|
|
||||||
var values []string
|
|
||||||
if c.Format == "csv" {
|
|
||||||
if row.Name != "" {
|
|
||||||
values = append(values, row.Name)
|
|
||||||
}
|
|
||||||
if len(tags) > 0 {
|
|
||||||
values = append(values, strings.Join(tags, ","))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, vv := range v {
|
|
||||||
values = append(values, interfaceToString(vv))
|
|
||||||
}
|
|
||||||
rows = append(rows, strings.Join(values, separator))
|
|
||||||
}
|
|
||||||
// Outout a line separator if in column format
|
|
||||||
if c.Format == "column" {
|
|
||||||
rows = append(rows, "")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return rows
|
|
||||||
}
|
|
||||||
|
|
||||||
func interfaceToString(v interface{}) string {
|
|
||||||
switch t := v.(type) {
|
|
||||||
case nil:
|
|
||||||
return ""
|
|
||||||
case bool:
|
|
||||||
return fmt.Sprintf("%v", v)
|
|
||||||
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr:
|
|
||||||
return fmt.Sprintf("%d", t)
|
|
||||||
case float32, float64:
|
|
||||||
return fmt.Sprintf("%v", t)
|
|
||||||
default:
|
|
||||||
return fmt.Sprintf("%v", t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) Settings() {
|
|
||||||
w := new(tabwriter.Writer)
|
|
||||||
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
|
|
||||||
if c.Port > 0 {
|
|
||||||
fmt.Fprintf(w, "Host\t%s:%d\n", c.Host, c.Port)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(w, "Host\t%s\n", c.Host)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "Username\t%s\n", c.Username)
|
|
||||||
fmt.Fprintf(w, "Database\t%s\n", c.Database)
|
|
||||||
fmt.Fprintf(w, "Pretty\t%v\n", c.Pretty)
|
|
||||||
fmt.Fprintf(w, "Format\t%s\n", c.Format)
|
|
||||||
fmt.Fprintf(w, "Write Consistency\t%s\n", c.WriteConsistency)
|
|
||||||
fmt.Fprintln(w)
|
|
||||||
w.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) help() {
|
|
||||||
fmt.Println(`Usage:
|
|
||||||
connect <host:port> connect to another node
|
|
||||||
auth prompt for username and password
|
|
||||||
pretty toggle pretty print
|
|
||||||
use <db_name> set current databases
|
|
||||||
format <format> set the output format: json, csv, or column
|
|
||||||
precision <format> set the timestamp format: h,m,s,ms,u,ns
|
|
||||||
consistency <level> set write consistency level: any, one, quorum, or all
|
|
||||||
settings output the current settings for the shell
|
|
||||||
exit quit the influx shell
|
|
||||||
|
|
||||||
show databases show database names
|
|
||||||
show series show series information
|
|
||||||
show measurements show measurement information
|
|
||||||
show tag keys show tag key information
|
|
||||||
show tag values show tag value information
|
|
||||||
|
|
||||||
a full list of influxql commands can be found at:
|
|
||||||
https://influxdb.com/docs/v0.9/query_language/spec.html
|
|
||||||
`)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) history() {
|
|
||||||
usr, err := user.Current()
|
|
||||||
// Only load history if we can get the user
|
|
||||||
if err == nil {
|
|
||||||
historyFile := filepath.Join(usr.HomeDir, ".influx_history")
|
|
||||||
if history, err := ioutil.ReadFile(historyFile); err == nil {
|
|
||||||
fmt.Print(string(history))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) gopher() {
|
|
||||||
fmt.Println(`
|
|
||||||
.-::-::://:-::- .:/++/'
|
|
||||||
'://:-''/oo+//++o+/.://o- ./+:
|
|
||||||
.:-. '++- .o/ '+yydhy' o-
|
|
||||||
.:/. .h: :osoys .smMN- :/
|
|
||||||
-/:.' s- /MMMymh. '/y/ s'
|
|
||||||
-+s:'''' d -mMMms// '-/o:
|
|
||||||
-/++/++/////:. o: '... s- :s.
|
|
||||||
:+-+s-' ':/' 's- /+ 'o:
|
|
||||||
'+-'o: /ydhsh. '//. '-o- o-
|
|
||||||
.y. o: .MMMdm+y ':+++:::/+:.' s:
|
|
||||||
.-h/ y- 'sdmds'h -+ydds:::-.' 'h.
|
|
||||||
.//-.d' o: '.' 'dsNMMMNh:.:++' :y
|
|
||||||
+y. 'd 's. .s:mddds: ++ o/
|
|
||||||
'N- odd 'o/. './o-s-' .---+++' o-
|
|
||||||
'N' yNd .://:/:::::. -s -+/s/./s' 'o/'
|
|
||||||
so' .h '''' ////s: '+. .s +y'
|
|
||||||
os/-.y' 's' 'y::+ +d'
|
|
||||||
'.:o/ -+:-:.' so.---.'
|
|
||||||
o' 'd-.''/s'
|
|
||||||
.s' :y.''.y
|
|
||||||
-s mo:::'
|
|
||||||
:: yh
|
|
||||||
// '''' /M'
|
|
||||||
o+ .s///:/. 'N:
|
|
||||||
:+ /: -s' ho
|
|
||||||
's- -/s/:+/.+h' +h
|
|
||||||
ys' ':' '-. -d
|
|
||||||
oh .h
|
|
||||||
/o .s
|
|
||||||
s. .h
|
|
||||||
-y .d
|
|
||||||
m/ -h
|
|
||||||
+d /o
|
|
||||||
'N- y:
|
|
||||||
h: m.
|
|
||||||
s- -d
|
|
||||||
o- s+
|
|
||||||
+- 'm'
|
|
||||||
s/ oo--.
|
|
||||||
y- /s ':+'
|
|
||||||
s' 'od--' .d:
|
|
||||||
-+ ':o: ':+-/+
|
|
||||||
y- .:+- '
|
|
||||||
//o- '.:+/.
|
|
||||||
.-:+/' ''-/+/.
|
|
||||||
./:' ''.:o+/-'
|
|
||||||
.+o:/:/+-' ''.-+ooo/-'
|
|
||||||
o: -h///++////-.
|
|
||||||
/: .o/
|
|
||||||
//+ 'y
|
|
||||||
./sooy.
|
|
||||||
|
|
||||||
`)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -101,6 +101,7 @@ func u64tob(v uint64) []byte {
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ShardIDs is a collection of UINT 64 that represent shard ids.
|
||||||
type ShardIDs []uint64
|
type ShardIDs []uint64
|
||||||
|
|
||||||
func (a ShardIDs) Len() int { return len(a) }
|
func (a ShardIDs) Len() int { return len(a) }
|
||||||
|
|
|
@ -49,7 +49,7 @@ func (b *blockStats) inc(typ int, enc byte) {
|
||||||
for len(b.counts[typ]) <= int(enc) {
|
for len(b.counts[typ]) <= int(enc) {
|
||||||
b.counts[typ] = append(b.counts[typ], 0)
|
b.counts[typ] = append(b.counts[typ], 0)
|
||||||
}
|
}
|
||||||
b.counts[typ][enc] += 1
|
b.counts[typ][enc]++
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *blockStats) size(sz int) {
|
func (b *blockStats) size(sz int) {
|
||||||
|
@ -300,7 +300,7 @@ func cmdDumpTsm1(opts *tsdmDumpOpts) {
|
||||||
|
|
||||||
// Possible corruption? Try to read as much as we can and point to the problem.
|
// Possible corruption? Try to read as much as we can and point to the problem.
|
||||||
if key == "" {
|
if key == "" {
|
||||||
errors = append(errors, fmt.Errorf("index pos %d, field id: %d, missing key for id.", i, block.id))
|
errors = append(errors, fmt.Errorf("index pos %d, field id: %d, missing key for id", i, block.id))
|
||||||
} else if len(split) < 2 {
|
} else if len(split) < 2 {
|
||||||
errors = append(errors, fmt.Errorf("index pos %d, field id: %d, key corrupt: got '%v'", i, block.id, key))
|
errors = append(errors, fmt.Errorf("index pos %d, field id: %d, key corrupt: got '%v'", i, block.id, key))
|
||||||
} else {
|
} else {
|
||||||
|
@ -382,7 +382,7 @@ func cmdDumpTsm1(opts *tsdmDumpOpts) {
|
||||||
|
|
||||||
if opts.filterKey != "" && !strings.Contains(invIds[id], opts.filterKey) {
|
if opts.filterKey != "" && !strings.Contains(invIds[id], opts.filterKey) {
|
||||||
i += (12 + int64(length))
|
i += (12 + int64(length))
|
||||||
blockCount += 1
|
blockCount++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -399,7 +399,7 @@ func cmdDumpTsm1(opts *tsdmDumpOpts) {
|
||||||
}, "\t"))
|
}, "\t"))
|
||||||
|
|
||||||
i += (12 + int64(length))
|
i += (12 + int64(length))
|
||||||
blockCount += 1
|
blockCount++
|
||||||
}
|
}
|
||||||
if opts.dumpBlocks {
|
if opts.dumpBlocks {
|
||||||
println("Blocks:")
|
println("Blocks:")
|
||||||
|
|
|
@ -20,7 +20,7 @@ import (
|
||||||
|
|
||||||
// These variables are populated via the Go linker.
|
// These variables are populated via the Go linker.
|
||||||
var (
|
var (
|
||||||
version string = "0.9"
|
version = "0.9"
|
||||||
commit string
|
commit string
|
||||||
branch string
|
branch string
|
||||||
buildTime string
|
buildTime string
|
||||||
|
@ -169,7 +169,7 @@ func ParseCommandName(args []string) (string, []string) {
|
||||||
return "", args
|
return "", args
|
||||||
}
|
}
|
||||||
|
|
||||||
// Command represents the command executed by "influxd version".
|
// VersionCommand represents the command executed by "influxd version".
|
||||||
type VersionCommand struct {
|
type VersionCommand struct {
|
||||||
Stdout io.Writer
|
Stdout io.Writer
|
||||||
Stderr io.Writer
|
Stderr io.Writer
|
||||||
|
|
1
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go
generated
vendored
1
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go
generated
vendored
|
@ -40,6 +40,7 @@ func (cmd *Command) Run(args ...string) error {
|
||||||
return cmd.Restore(config, path)
|
return cmd.Restore(config, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Restore restores a database snapshot
|
||||||
func (cmd *Command) Restore(config *Config, path string) error {
|
func (cmd *Command) Restore(config *Config, path string) error {
|
||||||
// Remove meta and data directories.
|
// Remove meta and data directories.
|
||||||
if err := os.RemoveAll(config.Meta.Dir); err != nil {
|
if err := os.RemoveAll(config.Meta.Dir); err != nil {
|
||||||
|
|
|
@ -72,6 +72,9 @@ func (cmd *Command) Run(args ...string) error {
|
||||||
cmd.Version, cmd.Branch, cmd.Commit, cmd.BuildTime)
|
cmd.Version, cmd.Branch, cmd.Commit, cmd.BuildTime)
|
||||||
log.Printf("Go version %s, GOMAXPROCS set to %d", runtime.Version(), runtime.GOMAXPROCS(0))
|
log.Printf("Go version %s, GOMAXPROCS set to %d", runtime.Version(), runtime.GOMAXPROCS(0))
|
||||||
|
|
||||||
|
// Set parallelism.
|
||||||
|
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||||
|
|
||||||
// Write the PID file.
|
// Write the PID file.
|
||||||
if err := cmd.writePIDFile(options.PIDFile); err != nil {
|
if err := cmd.writePIDFile(options.PIDFile); err != nil {
|
||||||
return fmt.Errorf("write pid file: %s", err)
|
return fmt.Errorf("write pid file: %s", err)
|
||||||
|
@ -102,7 +105,7 @@ func (cmd *Command) Run(args ...string) error {
|
||||||
|
|
||||||
// Validate the configuration.
|
// Validate the configuration.
|
||||||
if err := config.Validate(); err != nil {
|
if err := config.Validate(); err != nil {
|
||||||
return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`.", err)
|
return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create server from config and start it.
|
// Create server from config and start it.
|
||||||
|
|
|
@ -69,10 +69,8 @@ func NewConfig() *Config {
|
||||||
c.Monitor = monitor.NewConfig()
|
c.Monitor = monitor.NewConfig()
|
||||||
c.Subscriber = subscriber.NewConfig()
|
c.Subscriber = subscriber.NewConfig()
|
||||||
c.HTTPD = httpd.NewConfig()
|
c.HTTPD = httpd.NewConfig()
|
||||||
c.Graphites = []graphite.Config{graphite.NewConfig()}
|
|
||||||
c.Collectd = collectd.NewConfig()
|
c.Collectd = collectd.NewConfig()
|
||||||
c.OpenTSDB = opentsdb.NewConfig()
|
c.OpenTSDB = opentsdb.NewConfig()
|
||||||
c.UDPs = []udp.Config{udp.NewConfig()}
|
|
||||||
|
|
||||||
c.ContinuousQuery = continuous_querier.NewConfig()
|
c.ContinuousQuery = continuous_querier.NewConfig()
|
||||||
c.Retention = retention.NewConfig()
|
c.Retention = retention.NewConfig()
|
||||||
|
@ -101,6 +99,7 @@ func NewDemoConfig() (*Config, error) {
|
||||||
c.HintedHandoff.Dir = filepath.Join(homeDir, ".influxdb/hh")
|
c.HintedHandoff.Dir = filepath.Join(homeDir, ".influxdb/hh")
|
||||||
c.Data.WALDir = filepath.Join(homeDir, ".influxdb/wal")
|
c.Data.WALDir = filepath.Join(homeDir, ".influxdb/wal")
|
||||||
|
|
||||||
|
c.HintedHandoff.Enabled = true
|
||||||
c.Admin.Enabled = true
|
c.Admin.Enabled = true
|
||||||
|
|
||||||
return c, nil
|
return c, nil
|
||||||
|
@ -110,7 +109,7 @@ func NewDemoConfig() (*Config, error) {
|
||||||
func (c *Config) Validate() error {
|
func (c *Config) Validate() error {
|
||||||
if c.Meta.Dir == "" {
|
if c.Meta.Dir == "" {
|
||||||
return errors.New("Meta.Dir must be specified")
|
return errors.New("Meta.Dir must be specified")
|
||||||
} else if c.HintedHandoff.Dir == "" {
|
} else if c.HintedHandoff.Enabled && c.HintedHandoff.Dir == "" {
|
||||||
return errors.New("HintedHandoff.Dir must be specified")
|
return errors.New("HintedHandoff.Dir must be specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,6 +125,7 @@ func (c *Config) Validate() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ApplyEnvOverrides apply the environment configuration on top of the config.
|
||||||
func (c *Config) ApplyEnvOverrides() error {
|
func (c *Config) ApplyEnvOverrides() error {
|
||||||
return c.applyEnvOverrides("INFLUXDB", reflect.ValueOf(c))
|
return c.applyEnvOverrides("INFLUXDB", reflect.ValueOf(c))
|
||||||
}
|
}
|
||||||
|
|
2
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go
generated
vendored
2
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go
generated
vendored
|
@ -54,7 +54,7 @@ func (cmd *PrintConfigCommand) Run(args ...string) error {
|
||||||
|
|
||||||
// Validate the configuration.
|
// Validate the configuration.
|
||||||
if err := config.Validate(); err != nil {
|
if err := config.Validate(); err != nil {
|
||||||
return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`.", err)
|
return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
toml.NewEncoder(cmd.Stdout).Encode(config)
|
toml.NewEncoder(cmd.Stdout).Encode(config)
|
||||||
|
|
|
@ -30,6 +30,7 @@ import (
|
||||||
"github.com/influxdb/influxdb/services/udp"
|
"github.com/influxdb/influxdb/services/udp"
|
||||||
"github.com/influxdb/influxdb/tcp"
|
"github.com/influxdb/influxdb/tcp"
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
"github.com/influxdb/influxdb/tsdb"
|
||||||
|
// Initialize the engine packages
|
||||||
_ "github.com/influxdb/influxdb/tsdb/engine"
|
_ "github.com/influxdb/influxdb/tsdb/engine"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,14 @@ reporting-disabled = false
|
||||||
heartbeat-timeout = "1s"
|
heartbeat-timeout = "1s"
|
||||||
leader-lease-timeout = "500ms"
|
leader-lease-timeout = "500ms"
|
||||||
commit-timeout = "50ms"
|
commit-timeout = "50ms"
|
||||||
|
cluster-tracing = false
|
||||||
|
|
||||||
|
# If enabled, when a Raft cluster loses a peer due to a `DROP SERVER` command,
|
||||||
|
# the leader will automatically ask a non-raft peer node to promote to a raft
|
||||||
|
# peer. This only happens if there is a non-raft peer node available to promote.
|
||||||
|
# This setting only affects the local node, so to ensure if operates correctly, be sure to set
|
||||||
|
# it in the config of every node.
|
||||||
|
raft-promotion-enabled = true
|
||||||
|
|
||||||
###
|
###
|
||||||
### [data]
|
### [data]
|
||||||
|
|
|
@ -24,6 +24,9 @@ const (
|
||||||
|
|
||||||
// DefaultCommitTimeout is the default commit timeout for the store.
|
// DefaultCommitTimeout is the default commit timeout for the store.
|
||||||
DefaultCommitTimeout = 50 * time.Millisecond
|
DefaultCommitTimeout = 50 * time.Millisecond
|
||||||
|
|
||||||
|
// DefaultRaftPromotionEnabled is the default for auto promoting a node to a raft node when needed
|
||||||
|
DefaultRaftPromotionEnabled = true
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config represents the meta configuration.
|
// Config represents the meta configuration.
|
||||||
|
@ -38,8 +41,10 @@ type Config struct {
|
||||||
LeaderLeaseTimeout toml.Duration `toml:"leader-lease-timeout"`
|
LeaderLeaseTimeout toml.Duration `toml:"leader-lease-timeout"`
|
||||||
CommitTimeout toml.Duration `toml:"commit-timeout"`
|
CommitTimeout toml.Duration `toml:"commit-timeout"`
|
||||||
ClusterTracing bool `toml:"cluster-tracing"`
|
ClusterTracing bool `toml:"cluster-tracing"`
|
||||||
|
RaftPromotionEnabled bool `toml:"raft-promotion-enabled"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewConfig builds a new configuration with default values.
|
||||||
func NewConfig() *Config {
|
func NewConfig() *Config {
|
||||||
return &Config{
|
return &Config{
|
||||||
Hostname: DefaultHostname,
|
Hostname: DefaultHostname,
|
||||||
|
@ -49,5 +54,6 @@ func NewConfig() *Config {
|
||||||
HeartbeatTimeout: toml.Duration(DefaultHeartbeatTimeout),
|
HeartbeatTimeout: toml.Duration(DefaultHeartbeatTimeout),
|
||||||
LeaderLeaseTimeout: toml.Duration(DefaultLeaderLeaseTimeout),
|
LeaderLeaseTimeout: toml.Duration(DefaultLeaderLeaseTimeout),
|
||||||
CommitTimeout: toml.Duration(DefaultCommitTimeout),
|
CommitTimeout: toml.Duration(DefaultCommitTimeout),
|
||||||
|
RaftPromotionEnabled: DefaultRaftPromotionEnabled,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@ election-timeout = "10s"
|
||||||
heartbeat-timeout = "20s"
|
heartbeat-timeout = "20s"
|
||||||
leader-lease-timeout = "30h"
|
leader-lease-timeout = "30h"
|
||||||
commit-timeout = "40m"
|
commit-timeout = "40m"
|
||||||
|
raft-promotion-enabled = false
|
||||||
`, &c); err != nil {
|
`, &c); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -32,5 +33,7 @@ commit-timeout = "40m"
|
||||||
t.Fatalf("unexpected leader lease timeout: %v", c.LeaderLeaseTimeout)
|
t.Fatalf("unexpected leader lease timeout: %v", c.LeaderLeaseTimeout)
|
||||||
} else if time.Duration(c.CommitTimeout) != 40*time.Minute {
|
} else if time.Duration(c.CommitTimeout) != 40*time.Minute {
|
||||||
t.Fatalf("unexpected commit timeout: %v", c.CommitTimeout)
|
t.Fatalf("unexpected commit timeout: %v", c.CommitTimeout)
|
||||||
|
} else if c.RaftPromotionEnabled {
|
||||||
|
t.Fatalf("unexpected raft promotion enabled: %v", c.RaftPromotionEnabled)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -299,7 +299,7 @@ func (data *Data) SetDefaultRetentionPolicy(database, name string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShardGroup returns a list of all shard groups on a database and policy.
|
// ShardGroups returns a list of all shard groups on a database and policy.
|
||||||
func (data *Data) ShardGroups(database, policy string) ([]ShardGroupInfo, error) {
|
func (data *Data) ShardGroups(database, policy string) ([]ShardGroupInfo, error) {
|
||||||
// Find retention policy.
|
// Find retention policy.
|
||||||
rpi, err := data.RetentionPolicy(database, policy)
|
rpi, err := data.RetentionPolicy(database, policy)
|
||||||
|
@ -751,6 +751,13 @@ func (ni *NodeInfo) unmarshal(pb *internal.NodeInfo) {
|
||||||
ni.Host = pb.GetHost()
|
ni.Host = pb.GetHost()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NodeInfos is a slice of NodeInfo used for sorting
|
||||||
|
type NodeInfos []NodeInfo
|
||||||
|
|
||||||
|
func (n NodeInfos) Len() int { return len(n) }
|
||||||
|
func (n NodeInfos) Swap(i, j int) { n[i], n[j] = n[j], n[i] }
|
||||||
|
func (n NodeInfos) Less(i, j int) bool { return n[i].ID < n[j].ID }
|
||||||
|
|
||||||
// DatabaseInfo represents information about a database in the system.
|
// DatabaseInfo represents information about a database in the system.
|
||||||
type DatabaseInfo struct {
|
type DatabaseInfo struct {
|
||||||
Name string
|
Name string
|
||||||
|
@ -885,7 +892,7 @@ func (rpi *RetentionPolicyInfo) ShardGroupByTimestamp(timestamp time.Time) *Shar
|
||||||
|
|
||||||
// ExpiredShardGroups returns the Shard Groups which are considered expired, for the given time.
|
// ExpiredShardGroups returns the Shard Groups which are considered expired, for the given time.
|
||||||
func (rpi *RetentionPolicyInfo) ExpiredShardGroups(t time.Time) []*ShardGroupInfo {
|
func (rpi *RetentionPolicyInfo) ExpiredShardGroups(t time.Time) []*ShardGroupInfo {
|
||||||
groups := make([]*ShardGroupInfo, 0)
|
var groups = make([]*ShardGroupInfo, 0)
|
||||||
for i := range rpi.ShardGroups {
|
for i := range rpi.ShardGroups {
|
||||||
if rpi.ShardGroups[i].Deleted() {
|
if rpi.ShardGroups[i].Deleted() {
|
||||||
continue
|
continue
|
||||||
|
@ -899,7 +906,7 @@ func (rpi *RetentionPolicyInfo) ExpiredShardGroups(t time.Time) []*ShardGroupInf
|
||||||
|
|
||||||
// DeletedShardGroups returns the Shard Groups which are marked as deleted.
|
// DeletedShardGroups returns the Shard Groups which are marked as deleted.
|
||||||
func (rpi *RetentionPolicyInfo) DeletedShardGroups() []*ShardGroupInfo {
|
func (rpi *RetentionPolicyInfo) DeletedShardGroups() []*ShardGroupInfo {
|
||||||
groups := make([]*ShardGroupInfo, 0)
|
var groups = make([]*ShardGroupInfo, 0)
|
||||||
for i := range rpi.ShardGroups {
|
for i := range rpi.ShardGroups {
|
||||||
if rpi.ShardGroups[i].Deleted() {
|
if rpi.ShardGroups[i].Deleted() {
|
||||||
groups = append(groups, &rpi.ShardGroups[i])
|
groups = append(groups, &rpi.ShardGroups[i])
|
||||||
|
@ -982,6 +989,7 @@ type ShardGroupInfo struct {
|
||||||
Shards []ShardInfo
|
Shards []ShardInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ShardGroupInfos is a collection of ShardGroupInfo
|
||||||
type ShardGroupInfos []ShardGroupInfo
|
type ShardGroupInfos []ShardGroupInfo
|
||||||
|
|
||||||
func (a ShardGroupInfos) Len() int { return len(a) }
|
func (a ShardGroupInfos) Len() int { return len(a) }
|
||||||
|
@ -1018,8 +1026,8 @@ func (sgi ShardGroupInfo) clone() ShardGroupInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShardFor returns the ShardInfo for a Point hash
|
// ShardFor returns the ShardInfo for a Point hash
|
||||||
func (s *ShardGroupInfo) ShardFor(hash uint64) ShardInfo {
|
func (sgi *ShardGroupInfo) ShardFor(hash uint64) ShardInfo {
|
||||||
return s.Shards[hash%uint64(len(s.Shards))]
|
return sgi.Shards[hash%uint64(len(sgi.Shards))]
|
||||||
}
|
}
|
||||||
|
|
||||||
// marshal serializes to a protobuf representation.
|
// marshal serializes to a protobuf representation.
|
||||||
|
@ -1128,6 +1136,7 @@ func (si *ShardInfo) unmarshal(pb *internal.ShardInfo) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SubscriptionInfo hold the subscription information
|
||||||
type SubscriptionInfo struct {
|
type SubscriptionInfo struct {
|
||||||
Name string
|
Name string
|
||||||
Mode string
|
Mode string
|
||||||
|
|
|
@ -30,7 +30,7 @@ var (
|
||||||
// ErrNodeIDRequired is returned when using a zero node id.
|
// ErrNodeIDRequired is returned when using a zero node id.
|
||||||
ErrNodeIDRequired = newError("node id must be greater than 0")
|
ErrNodeIDRequired = newError("node id must be greater than 0")
|
||||||
|
|
||||||
// ErrNodeUnableToDropSingleNode is returned if the node being dropped is the last
|
// ErrNodeUnableToDropFinalNode is returned if the node being dropped is the last
|
||||||
// node in the cluster
|
// node in the cluster
|
||||||
ErrNodeUnableToDropFinalNode = newError("unable to drop the final node in a cluster")
|
ErrNodeUnableToDropFinalNode = newError("unable to drop the final node in a cluster")
|
||||||
)
|
)
|
||||||
|
|
|
@ -50,14 +50,18 @@ It has these top-level messages:
|
||||||
FetchDataResponse
|
FetchDataResponse
|
||||||
JoinRequest
|
JoinRequest
|
||||||
JoinResponse
|
JoinResponse
|
||||||
|
PromoteRaftRequest
|
||||||
|
PromoteRaftResponse
|
||||||
*/
|
*/
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
import proto "github.com/gogo/protobuf/proto"
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
import math "math"
|
import math "math"
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
var _ = proto.Marshal
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
var _ = math.Inf
|
var _ = math.Inf
|
||||||
|
|
||||||
type RPCType int32
|
type RPCType int32
|
||||||
|
@ -66,17 +70,20 @@ const (
|
||||||
RPCType_Error RPCType = 1
|
RPCType_Error RPCType = 1
|
||||||
RPCType_FetchData RPCType = 2
|
RPCType_FetchData RPCType = 2
|
||||||
RPCType_Join RPCType = 3
|
RPCType_Join RPCType = 3
|
||||||
|
RPCType_PromoteRaft RPCType = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
var RPCType_name = map[int32]string{
|
var RPCType_name = map[int32]string{
|
||||||
1: "Error",
|
1: "Error",
|
||||||
2: "FetchData",
|
2: "FetchData",
|
||||||
3: "Join",
|
3: "Join",
|
||||||
|
4: "PromoteRaft",
|
||||||
}
|
}
|
||||||
var RPCType_value = map[string]int32{
|
var RPCType_value = map[string]int32{
|
||||||
"Error": 1,
|
"Error": 1,
|
||||||
"FetchData": 2,
|
"FetchData": 2,
|
||||||
"Join": 3,
|
"Join": 3,
|
||||||
|
"PromoteRaft": 4,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x RPCType) Enum() *RPCType {
|
func (x RPCType) Enum() *RPCType {
|
||||||
|
@ -190,15 +197,15 @@ func (x *Command_Type) UnmarshalJSON(data []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Data struct {
|
type Data struct {
|
||||||
Term *uint64 `protobuf:"varint,1,req" json:"Term,omitempty"`
|
Term *uint64 `protobuf:"varint,1,req,name=Term" json:"Term,omitempty"`
|
||||||
Index *uint64 `protobuf:"varint,2,req" json:"Index,omitempty"`
|
Index *uint64 `protobuf:"varint,2,req,name=Index" json:"Index,omitempty"`
|
||||||
ClusterID *uint64 `protobuf:"varint,3,req" json:"ClusterID,omitempty"`
|
ClusterID *uint64 `protobuf:"varint,3,req,name=ClusterID" json:"ClusterID,omitempty"`
|
||||||
Nodes []*NodeInfo `protobuf:"bytes,4,rep" json:"Nodes,omitempty"`
|
Nodes []*NodeInfo `protobuf:"bytes,4,rep,name=Nodes" json:"Nodes,omitempty"`
|
||||||
Databases []*DatabaseInfo `protobuf:"bytes,5,rep" json:"Databases,omitempty"`
|
Databases []*DatabaseInfo `protobuf:"bytes,5,rep,name=Databases" json:"Databases,omitempty"`
|
||||||
Users []*UserInfo `protobuf:"bytes,6,rep" json:"Users,omitempty"`
|
Users []*UserInfo `protobuf:"bytes,6,rep,name=Users" json:"Users,omitempty"`
|
||||||
MaxNodeID *uint64 `protobuf:"varint,7,req" json:"MaxNodeID,omitempty"`
|
MaxNodeID *uint64 `protobuf:"varint,7,req,name=MaxNodeID" json:"MaxNodeID,omitempty"`
|
||||||
MaxShardGroupID *uint64 `protobuf:"varint,8,req" json:"MaxShardGroupID,omitempty"`
|
MaxShardGroupID *uint64 `protobuf:"varint,8,req,name=MaxShardGroupID" json:"MaxShardGroupID,omitempty"`
|
||||||
MaxShardID *uint64 `protobuf:"varint,9,req" json:"MaxShardID,omitempty"`
|
MaxShardID *uint64 `protobuf:"varint,9,req,name=MaxShardID" json:"MaxShardID,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,8 +277,8 @@ func (m *Data) GetMaxShardID() uint64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
type NodeInfo struct {
|
type NodeInfo struct {
|
||||||
ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"`
|
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"`
|
||||||
Host *string `protobuf:"bytes,2,req" json:"Host,omitempty"`
|
Host *string `protobuf:"bytes,2,req,name=Host" json:"Host,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -294,10 +301,10 @@ func (m *NodeInfo) GetHost() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DatabaseInfo struct {
|
type DatabaseInfo struct {
|
||||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
DefaultRetentionPolicy *string `protobuf:"bytes,2,req" json:"DefaultRetentionPolicy,omitempty"`
|
DefaultRetentionPolicy *string `protobuf:"bytes,2,req,name=DefaultRetentionPolicy" json:"DefaultRetentionPolicy,omitempty"`
|
||||||
RetentionPolicies []*RetentionPolicyInfo `protobuf:"bytes,3,rep" json:"RetentionPolicies,omitempty"`
|
RetentionPolicies []*RetentionPolicyInfo `protobuf:"bytes,3,rep,name=RetentionPolicies" json:"RetentionPolicies,omitempty"`
|
||||||
ContinuousQueries []*ContinuousQueryInfo `protobuf:"bytes,4,rep" json:"ContinuousQueries,omitempty"`
|
ContinuousQueries []*ContinuousQueryInfo `protobuf:"bytes,4,rep,name=ContinuousQueries" json:"ContinuousQueries,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -334,12 +341,12 @@ func (m *DatabaseInfo) GetContinuousQueries() []*ContinuousQueryInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
type RetentionPolicyInfo struct {
|
type RetentionPolicyInfo struct {
|
||||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
Duration *int64 `protobuf:"varint,2,req" json:"Duration,omitempty"`
|
Duration *int64 `protobuf:"varint,2,req,name=Duration" json:"Duration,omitempty"`
|
||||||
ShardGroupDuration *int64 `protobuf:"varint,3,req" json:"ShardGroupDuration,omitempty"`
|
ShardGroupDuration *int64 `protobuf:"varint,3,req,name=ShardGroupDuration" json:"ShardGroupDuration,omitempty"`
|
||||||
ReplicaN *uint32 `protobuf:"varint,4,req" json:"ReplicaN,omitempty"`
|
ReplicaN *uint32 `protobuf:"varint,4,req,name=ReplicaN" json:"ReplicaN,omitempty"`
|
||||||
ShardGroups []*ShardGroupInfo `protobuf:"bytes,5,rep" json:"ShardGroups,omitempty"`
|
ShardGroups []*ShardGroupInfo `protobuf:"bytes,5,rep,name=ShardGroups" json:"ShardGroups,omitempty"`
|
||||||
Subscriptions []*SubscriptionInfo `protobuf:"bytes,6,rep" json:"Subscriptions,omitempty"`
|
Subscriptions []*SubscriptionInfo `protobuf:"bytes,6,rep,name=Subscriptions" json:"Subscriptions,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -390,11 +397,11 @@ func (m *RetentionPolicyInfo) GetSubscriptions() []*SubscriptionInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ShardGroupInfo struct {
|
type ShardGroupInfo struct {
|
||||||
ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"`
|
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"`
|
||||||
StartTime *int64 `protobuf:"varint,2,req" json:"StartTime,omitempty"`
|
StartTime *int64 `protobuf:"varint,2,req,name=StartTime" json:"StartTime,omitempty"`
|
||||||
EndTime *int64 `protobuf:"varint,3,req" json:"EndTime,omitempty"`
|
EndTime *int64 `protobuf:"varint,3,req,name=EndTime" json:"EndTime,omitempty"`
|
||||||
DeletedAt *int64 `protobuf:"varint,4,req" json:"DeletedAt,omitempty"`
|
DeletedAt *int64 `protobuf:"varint,4,req,name=DeletedAt" json:"DeletedAt,omitempty"`
|
||||||
Shards []*ShardInfo `protobuf:"bytes,5,rep" json:"Shards,omitempty"`
|
Shards []*ShardInfo `protobuf:"bytes,5,rep,name=Shards" json:"Shards,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -438,9 +445,9 @@ func (m *ShardGroupInfo) GetShards() []*ShardInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ShardInfo struct {
|
type ShardInfo struct {
|
||||||
ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"`
|
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"`
|
||||||
OwnerIDs []uint64 `protobuf:"varint,2,rep" json:"OwnerIDs,omitempty"`
|
OwnerIDs []uint64 `protobuf:"varint,2,rep,name=OwnerIDs" json:"OwnerIDs,omitempty"`
|
||||||
Owners []*ShardOwner `protobuf:"bytes,3,rep" json:"Owners,omitempty"`
|
Owners []*ShardOwner `protobuf:"bytes,3,rep,name=Owners" json:"Owners,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -470,9 +477,9 @@ func (m *ShardInfo) GetOwners() []*ShardOwner {
|
||||||
}
|
}
|
||||||
|
|
||||||
type SubscriptionInfo struct {
|
type SubscriptionInfo struct {
|
||||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
Mode *string `protobuf:"bytes,2,req" json:"Mode,omitempty"`
|
Mode *string `protobuf:"bytes,2,req,name=Mode" json:"Mode,omitempty"`
|
||||||
Destinations []string `protobuf:"bytes,3,rep" json:"Destinations,omitempty"`
|
Destinations []string `protobuf:"bytes,3,rep,name=Destinations" json:"Destinations,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -502,7 +509,7 @@ func (m *SubscriptionInfo) GetDestinations() []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ShardOwner struct {
|
type ShardOwner struct {
|
||||||
NodeID *uint64 `protobuf:"varint,1,req" json:"NodeID,omitempty"`
|
NodeID *uint64 `protobuf:"varint,1,req,name=NodeID" json:"NodeID,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -518,8 +525,8 @@ func (m *ShardOwner) GetNodeID() uint64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ContinuousQueryInfo struct {
|
type ContinuousQueryInfo struct {
|
||||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
Query *string `protobuf:"bytes,2,req" json:"Query,omitempty"`
|
Query *string `protobuf:"bytes,2,req,name=Query" json:"Query,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -542,10 +549,10 @@ func (m *ContinuousQueryInfo) GetQuery() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type UserInfo struct {
|
type UserInfo struct {
|
||||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
Hash *string `protobuf:"bytes,2,req" json:"Hash,omitempty"`
|
Hash *string `protobuf:"bytes,2,req,name=Hash" json:"Hash,omitempty"`
|
||||||
Admin *bool `protobuf:"varint,3,req" json:"Admin,omitempty"`
|
Admin *bool `protobuf:"varint,3,req,name=Admin" json:"Admin,omitempty"`
|
||||||
Privileges []*UserPrivilege `protobuf:"bytes,4,rep" json:"Privileges,omitempty"`
|
Privileges []*UserPrivilege `protobuf:"bytes,4,rep,name=Privileges" json:"Privileges,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -582,8 +589,8 @@ func (m *UserInfo) GetPrivileges() []*UserPrivilege {
|
||||||
}
|
}
|
||||||
|
|
||||||
type UserPrivilege struct {
|
type UserPrivilege struct {
|
||||||
Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
|
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"`
|
||||||
Privilege *int32 `protobuf:"varint,2,req" json:"Privilege,omitempty"`
|
Privilege *int32 `protobuf:"varint,2,req,name=Privilege" json:"Privilege,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -637,8 +644,8 @@ func (m *Command) GetType() Command_Type {
|
||||||
}
|
}
|
||||||
|
|
||||||
type CreateNodeCommand struct {
|
type CreateNodeCommand struct {
|
||||||
Host *string `protobuf:"bytes,1,req" json:"Host,omitempty"`
|
Host *string `protobuf:"bytes,1,req,name=Host" json:"Host,omitempty"`
|
||||||
Rand *uint64 `protobuf:"varint,2,req" json:"Rand,omitempty"`
|
Rand *uint64 `protobuf:"varint,2,req,name=Rand" json:"Rand,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -669,8 +676,8 @@ var E_CreateNodeCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteNodeCommand struct {
|
type DeleteNodeCommand struct {
|
||||||
ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"`
|
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"`
|
||||||
Force *bool `protobuf:"varint,2,req" json:"Force,omitempty"`
|
Force *bool `protobuf:"varint,2,req,name=Force" json:"Force,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -701,7 +708,7 @@ var E_DeleteNodeCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type CreateDatabaseCommand struct {
|
type CreateDatabaseCommand struct {
|
||||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -725,7 +732,7 @@ var E_CreateDatabaseCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type DropDatabaseCommand struct {
|
type DropDatabaseCommand struct {
|
||||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -749,8 +756,8 @@ var E_DropDatabaseCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type CreateRetentionPolicyCommand struct {
|
type CreateRetentionPolicyCommand struct {
|
||||||
Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
|
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"`
|
||||||
RetentionPolicy *RetentionPolicyInfo `protobuf:"bytes,2,req" json:"RetentionPolicy,omitempty"`
|
RetentionPolicy *RetentionPolicyInfo `protobuf:"bytes,2,req,name=RetentionPolicy" json:"RetentionPolicy,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -781,8 +788,8 @@ var E_CreateRetentionPolicyCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type DropRetentionPolicyCommand struct {
|
type DropRetentionPolicyCommand struct {
|
||||||
Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
|
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"`
|
||||||
Name *string `protobuf:"bytes,2,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -813,8 +820,8 @@ var E_DropRetentionPolicyCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type SetDefaultRetentionPolicyCommand struct {
|
type SetDefaultRetentionPolicyCommand struct {
|
||||||
Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
|
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"`
|
||||||
Name *string `protobuf:"bytes,2,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -845,11 +852,11 @@ var E_SetDefaultRetentionPolicyCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type UpdateRetentionPolicyCommand struct {
|
type UpdateRetentionPolicyCommand struct {
|
||||||
Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
|
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"`
|
||||||
Name *string `protobuf:"bytes,2,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"`
|
||||||
NewName *string `protobuf:"bytes,3,opt" json:"NewName,omitempty"`
|
NewName *string `protobuf:"bytes,3,opt,name=NewName" json:"NewName,omitempty"`
|
||||||
Duration *int64 `protobuf:"varint,4,opt" json:"Duration,omitempty"`
|
Duration *int64 `protobuf:"varint,4,opt,name=Duration" json:"Duration,omitempty"`
|
||||||
ReplicaN *uint32 `protobuf:"varint,5,opt" json:"ReplicaN,omitempty"`
|
ReplicaN *uint32 `protobuf:"varint,5,opt,name=ReplicaN" json:"ReplicaN,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -901,9 +908,9 @@ var E_UpdateRetentionPolicyCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type CreateShardGroupCommand struct {
|
type CreateShardGroupCommand struct {
|
||||||
Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
|
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"`
|
||||||
Policy *string `protobuf:"bytes,2,req" json:"Policy,omitempty"`
|
Policy *string `protobuf:"bytes,2,req,name=Policy" json:"Policy,omitempty"`
|
||||||
Timestamp *int64 `protobuf:"varint,3,req" json:"Timestamp,omitempty"`
|
Timestamp *int64 `protobuf:"varint,3,req,name=Timestamp" json:"Timestamp,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -941,9 +948,9 @@ var E_CreateShardGroupCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteShardGroupCommand struct {
|
type DeleteShardGroupCommand struct {
|
||||||
Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
|
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"`
|
||||||
Policy *string `protobuf:"bytes,2,req" json:"Policy,omitempty"`
|
Policy *string `protobuf:"bytes,2,req,name=Policy" json:"Policy,omitempty"`
|
||||||
ShardGroupID *uint64 `protobuf:"varint,3,req" json:"ShardGroupID,omitempty"`
|
ShardGroupID *uint64 `protobuf:"varint,3,req,name=ShardGroupID" json:"ShardGroupID,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -981,9 +988,9 @@ var E_DeleteShardGroupCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type CreateContinuousQueryCommand struct {
|
type CreateContinuousQueryCommand struct {
|
||||||
Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
|
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"`
|
||||||
Name *string `protobuf:"bytes,2,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"`
|
||||||
Query *string `protobuf:"bytes,3,req" json:"Query,omitempty"`
|
Query *string `protobuf:"bytes,3,req,name=Query" json:"Query,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1021,8 +1028,8 @@ var E_CreateContinuousQueryCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type DropContinuousQueryCommand struct {
|
type DropContinuousQueryCommand struct {
|
||||||
Database *string `protobuf:"bytes,1,req" json:"Database,omitempty"`
|
Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"`
|
||||||
Name *string `protobuf:"bytes,2,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1053,9 +1060,9 @@ var E_DropContinuousQueryCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type CreateUserCommand struct {
|
type CreateUserCommand struct {
|
||||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
Hash *string `protobuf:"bytes,2,req" json:"Hash,omitempty"`
|
Hash *string `protobuf:"bytes,2,req,name=Hash" json:"Hash,omitempty"`
|
||||||
Admin *bool `protobuf:"varint,3,req" json:"Admin,omitempty"`
|
Admin *bool `protobuf:"varint,3,req,name=Admin" json:"Admin,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1093,7 +1100,7 @@ var E_CreateUserCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type DropUserCommand struct {
|
type DropUserCommand struct {
|
||||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1117,8 +1124,8 @@ var E_DropUserCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type UpdateUserCommand struct {
|
type UpdateUserCommand struct {
|
||||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
Hash *string `protobuf:"bytes,2,req" json:"Hash,omitempty"`
|
Hash *string `protobuf:"bytes,2,req,name=Hash" json:"Hash,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1149,9 +1156,9 @@ var E_UpdateUserCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type SetPrivilegeCommand struct {
|
type SetPrivilegeCommand struct {
|
||||||
Username *string `protobuf:"bytes,1,req" json:"Username,omitempty"`
|
Username *string `protobuf:"bytes,1,req,name=Username" json:"Username,omitempty"`
|
||||||
Database *string `protobuf:"bytes,2,req" json:"Database,omitempty"`
|
Database *string `protobuf:"bytes,2,req,name=Database" json:"Database,omitempty"`
|
||||||
Privilege *int32 `protobuf:"varint,3,req" json:"Privilege,omitempty"`
|
Privilege *int32 `protobuf:"varint,3,req,name=Privilege" json:"Privilege,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1189,7 +1196,7 @@ var E_SetPrivilegeCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type SetDataCommand struct {
|
type SetDataCommand struct {
|
||||||
Data *Data `protobuf:"bytes,1,req" json:"Data,omitempty"`
|
Data *Data `protobuf:"bytes,1,req,name=Data" json:"Data,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1213,8 +1220,8 @@ var E_SetDataCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type SetAdminPrivilegeCommand struct {
|
type SetAdminPrivilegeCommand struct {
|
||||||
Username *string `protobuf:"bytes,1,req" json:"Username,omitempty"`
|
Username *string `protobuf:"bytes,1,req,name=Username" json:"Username,omitempty"`
|
||||||
Admin *bool `protobuf:"varint,2,req" json:"Admin,omitempty"`
|
Admin *bool `protobuf:"varint,2,req,name=Admin" json:"Admin,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1245,8 +1252,8 @@ var E_SetAdminPrivilegeCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type UpdateNodeCommand struct {
|
type UpdateNodeCommand struct {
|
||||||
ID *uint64 `protobuf:"varint,1,req" json:"ID,omitempty"`
|
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"`
|
||||||
Host *string `protobuf:"bytes,2,req" json:"Host,omitempty"`
|
Host *string `protobuf:"bytes,2,req,name=Host" json:"Host,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1277,11 +1284,11 @@ var E_UpdateNodeCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type CreateSubscriptionCommand struct {
|
type CreateSubscriptionCommand struct {
|
||||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
Database *string `protobuf:"bytes,2,req" json:"Database,omitempty"`
|
Database *string `protobuf:"bytes,2,req,name=Database" json:"Database,omitempty"`
|
||||||
RetentionPolicy *string `protobuf:"bytes,3,req" json:"RetentionPolicy,omitempty"`
|
RetentionPolicy *string `protobuf:"bytes,3,req,name=RetentionPolicy" json:"RetentionPolicy,omitempty"`
|
||||||
Mode *string `protobuf:"bytes,4,req" json:"Mode,omitempty"`
|
Mode *string `protobuf:"bytes,4,req,name=Mode" json:"Mode,omitempty"`
|
||||||
Destinations []string `protobuf:"bytes,5,rep" json:"Destinations,omitempty"`
|
Destinations []string `protobuf:"bytes,5,rep,name=Destinations" json:"Destinations,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1333,9 +1340,9 @@ var E_CreateSubscriptionCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type DropSubscriptionCommand struct {
|
type DropSubscriptionCommand struct {
|
||||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
Database *string `protobuf:"bytes,2,req" json:"Database,omitempty"`
|
Database *string `protobuf:"bytes,2,req,name=Database" json:"Database,omitempty"`
|
||||||
RetentionPolicy *string `protobuf:"bytes,3,req" json:"RetentionPolicy,omitempty"`
|
RetentionPolicy *string `protobuf:"bytes,3,req,name=RetentionPolicy" json:"RetentionPolicy,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1405,9 +1412,9 @@ var E_RemovePeerCommand_Command = &proto.ExtensionDesc{
|
||||||
}
|
}
|
||||||
|
|
||||||
type Response struct {
|
type Response struct {
|
||||||
OK *bool `protobuf:"varint,1,req" json:"OK,omitempty"`
|
OK *bool `protobuf:"varint,1,req,name=OK" json:"OK,omitempty"`
|
||||||
Error *string `protobuf:"bytes,2,opt" json:"Error,omitempty"`
|
Error *string `protobuf:"bytes,2,opt,name=Error" json:"Error,omitempty"`
|
||||||
Index *uint64 `protobuf:"varint,3,opt" json:"Index,omitempty"`
|
Index *uint64 `protobuf:"varint,3,opt,name=Index" json:"Index,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1437,8 +1444,8 @@ func (m *Response) GetIndex() uint64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ResponseHeader struct {
|
type ResponseHeader struct {
|
||||||
OK *bool `protobuf:"varint,1,req" json:"OK,omitempty"`
|
OK *bool `protobuf:"varint,1,req,name=OK" json:"OK,omitempty"`
|
||||||
Error *string `protobuf:"bytes,2,opt" json:"Error,omitempty"`
|
Error *string `protobuf:"bytes,2,opt,name=Error" json:"Error,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1461,7 +1468,7 @@ func (m *ResponseHeader) GetError() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ErrorResponse struct {
|
type ErrorResponse struct {
|
||||||
Header *ResponseHeader `protobuf:"bytes,1,req" json:"Header,omitempty"`
|
Header *ResponseHeader `protobuf:"bytes,1,req,name=Header" json:"Header,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1477,9 +1484,9 @@ func (m *ErrorResponse) GetHeader() *ResponseHeader {
|
||||||
}
|
}
|
||||||
|
|
||||||
type FetchDataRequest struct {
|
type FetchDataRequest struct {
|
||||||
Index *uint64 `protobuf:"varint,1,req" json:"Index,omitempty"`
|
Index *uint64 `protobuf:"varint,1,req,name=Index" json:"Index,omitempty"`
|
||||||
Term *uint64 `protobuf:"varint,2,req" json:"Term,omitempty"`
|
Term *uint64 `protobuf:"varint,2,req,name=Term" json:"Term,omitempty"`
|
||||||
Blocking *bool `protobuf:"varint,3,opt,def=0" json:"Blocking,omitempty"`
|
Blocking *bool `protobuf:"varint,3,opt,name=Blocking,def=0" json:"Blocking,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1511,10 +1518,10 @@ func (m *FetchDataRequest) GetBlocking() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
type FetchDataResponse struct {
|
type FetchDataResponse struct {
|
||||||
Header *ResponseHeader `protobuf:"bytes,1,req" json:"Header,omitempty"`
|
Header *ResponseHeader `protobuf:"bytes,1,req,name=Header" json:"Header,omitempty"`
|
||||||
Index *uint64 `protobuf:"varint,2,req" json:"Index,omitempty"`
|
Index *uint64 `protobuf:"varint,2,req,name=Index" json:"Index,omitempty"`
|
||||||
Term *uint64 `protobuf:"varint,3,req" json:"Term,omitempty"`
|
Term *uint64 `protobuf:"varint,3,req,name=Term" json:"Term,omitempty"`
|
||||||
Data []byte `protobuf:"bytes,4,opt" json:"Data,omitempty"`
|
Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1551,7 +1558,7 @@ func (m *FetchDataResponse) GetData() []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
type JoinRequest struct {
|
type JoinRequest struct {
|
||||||
Addr *string `protobuf:"bytes,1,req" json:"Addr,omitempty"`
|
Addr *string `protobuf:"bytes,1,req,name=Addr" json:"Addr,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1567,14 +1574,10 @@ func (m *JoinRequest) GetAddr() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type JoinResponse struct {
|
type JoinResponse struct {
|
||||||
Header *ResponseHeader `protobuf:"bytes,1,req" json:"Header,omitempty"`
|
Header *ResponseHeader `protobuf:"bytes,1,req,name=Header" json:"Header,omitempty"`
|
||||||
// Indicates that this node should take part in the raft cluster.
|
EnableRaft *bool `protobuf:"varint,2,opt,name=EnableRaft" json:"EnableRaft,omitempty"`
|
||||||
EnableRaft *bool `protobuf:"varint,2,opt" json:"EnableRaft,omitempty"`
|
RaftNodes []string `protobuf:"bytes,3,rep,name=RaftNodes" json:"RaftNodes,omitempty"`
|
||||||
// The addresses of raft peers to use if joining as a raft member. If not joining
|
NodeID *uint64 `protobuf:"varint,4,opt,name=NodeID" json:"NodeID,omitempty"`
|
||||||
// as a raft member, these are the nodes running raft.
|
|
||||||
RaftNodes []string `protobuf:"bytes,3,rep" json:"RaftNodes,omitempty"`
|
|
||||||
// The node ID assigned to the requesting node.
|
|
||||||
NodeID *uint64 `protobuf:"varint,4,opt" json:"NodeID,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1610,6 +1613,54 @@ func (m *JoinResponse) GetNodeID() uint64 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PromoteRaftRequest struct {
|
||||||
|
Addr *string `protobuf:"bytes,1,req,name=Addr" json:"Addr,omitempty"`
|
||||||
|
RaftNodes []string `protobuf:"bytes,2,rep,name=RaftNodes" json:"RaftNodes,omitempty"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *PromoteRaftRequest) Reset() { *m = PromoteRaftRequest{} }
|
||||||
|
func (m *PromoteRaftRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*PromoteRaftRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (m *PromoteRaftRequest) GetAddr() string {
|
||||||
|
if m != nil && m.Addr != nil {
|
||||||
|
return *m.Addr
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *PromoteRaftRequest) GetRaftNodes() []string {
|
||||||
|
if m != nil {
|
||||||
|
return m.RaftNodes
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type PromoteRaftResponse struct {
|
||||||
|
Header *ResponseHeader `protobuf:"bytes,1,req,name=Header" json:"Header,omitempty"`
|
||||||
|
Success *bool `protobuf:"varint,2,opt,name=Success" json:"Success,omitempty"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *PromoteRaftResponse) Reset() { *m = PromoteRaftResponse{} }
|
||||||
|
func (m *PromoteRaftResponse) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*PromoteRaftResponse) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (m *PromoteRaftResponse) GetHeader() *ResponseHeader {
|
||||||
|
if m != nil {
|
||||||
|
return m.Header
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *PromoteRaftResponse) GetSuccess() bool {
|
||||||
|
if m != nil && m.Success != nil {
|
||||||
|
return *m.Success
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
proto.RegisterEnum("internal.RPCType", RPCType_name, RPCType_value)
|
proto.RegisterEnum("internal.RPCType", RPCType_name, RPCType_value)
|
||||||
proto.RegisterEnum("internal.Command_Type", Command_Type_name, Command_Type_value)
|
proto.RegisterEnum("internal.Command_Type", Command_Type_name, Command_Type_value)
|
||||||
|
|
|
@ -322,6 +322,7 @@ enum RPCType {
|
||||||
Error = 1;
|
Error = 1;
|
||||||
FetchData = 2;
|
FetchData = 2;
|
||||||
Join = 3;
|
Join = 3;
|
||||||
|
PromoteRaft = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ResponseHeader {
|
message ResponseHeader {
|
||||||
|
@ -363,3 +364,14 @@ message JoinResponse {
|
||||||
// The node ID assigned to the requesting node.
|
// The node ID assigned to the requesting node.
|
||||||
optional uint64 NodeID = 4;
|
optional uint64 NodeID = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message PromoteRaftRequest {
|
||||||
|
required string Addr = 1;
|
||||||
|
repeated string RaftNodes = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PromoteRaftResponse {
|
||||||
|
required ResponseHeader Header = 1;
|
||||||
|
|
||||||
|
optional bool Success = 2;
|
||||||
|
}
|
||||||
|
|
|
@ -28,9 +28,11 @@ type rpc struct {
|
||||||
|
|
||||||
store interface {
|
store interface {
|
||||||
cachedData() *Data
|
cachedData() *Data
|
||||||
|
enableLocalRaft() error
|
||||||
IsLeader() bool
|
IsLeader() bool
|
||||||
Leader() string
|
Leader() string
|
||||||
Peers() ([]string, error)
|
Peers() ([]string, error)
|
||||||
|
SetPeers(addrs []string) error
|
||||||
AddPeer(host string) error
|
AddPeer(host string) error
|
||||||
CreateNode(host string) (*NodeInfo, error)
|
CreateNode(host string) (*NodeInfo, error)
|
||||||
NodeByHost(host string) (*NodeInfo, error)
|
NodeByHost(host string) (*NodeInfo, error)
|
||||||
|
@ -38,18 +40,20 @@ type rpc struct {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// JoinResult defines the join result structure.
|
||||||
type JoinResult struct {
|
type JoinResult struct {
|
||||||
RaftEnabled bool
|
RaftEnabled bool
|
||||||
RaftNodes []string
|
RaftNodes []string
|
||||||
NodeID uint64
|
NodeID uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reply defines the interface for Reply objects.
|
||||||
type Reply interface {
|
type Reply interface {
|
||||||
GetHeader() *internal.ResponseHeader
|
GetHeader() *internal.ResponseHeader
|
||||||
}
|
}
|
||||||
|
|
||||||
// proxyLeader proxies the connection to the current raft leader
|
// proxyLeader proxies the connection to the current raft leader
|
||||||
func (r *rpc) proxyLeader(conn *net.TCPConn) {
|
func (r *rpc) proxyLeader(conn *net.TCPConn, buf []byte) {
|
||||||
if r.store.Leader() == "" {
|
if r.store.Leader() == "" {
|
||||||
r.sendError(conn, "no leader detected during proxyLeader")
|
r.sendError(conn, "no leader detected during proxyLeader")
|
||||||
return
|
return
|
||||||
|
@ -63,6 +67,8 @@ func (r *rpc) proxyLeader(conn *net.TCPConn) {
|
||||||
defer leaderConn.Close()
|
defer leaderConn.Close()
|
||||||
|
|
||||||
leaderConn.Write([]byte{MuxRPCHeader})
|
leaderConn.Write([]byte{MuxRPCHeader})
|
||||||
|
// re-write the original message to the leader
|
||||||
|
leaderConn.Write(buf)
|
||||||
if err := proxy(leaderConn.(*net.TCPConn), conn); err != nil {
|
if err := proxy(leaderConn.(*net.TCPConn), conn); err != nil {
|
||||||
r.sendError(conn, fmt.Sprintf("leader proxy error: %v", err))
|
r.sendError(conn, fmt.Sprintf("leader proxy error: %v", err))
|
||||||
}
|
}
|
||||||
|
@ -76,13 +82,39 @@ func (r *rpc) handleRPCConn(conn net.Conn) {
|
||||||
// in the cluster.
|
// in the cluster.
|
||||||
r.traceCluster("rpc connection from: %v", conn.RemoteAddr())
|
r.traceCluster("rpc connection from: %v", conn.RemoteAddr())
|
||||||
|
|
||||||
if !r.store.IsLeader() {
|
// Read and execute request.
|
||||||
r.proxyLeader(conn.(*net.TCPConn))
|
typ, buf, err := r.readMessage(conn)
|
||||||
|
// Handle unexpected RPC errors
|
||||||
|
if err != nil {
|
||||||
|
r.sendError(conn, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read and execute request.
|
if !r.store.IsLeader() && typ != internal.RPCType_PromoteRaft {
|
||||||
typ, resp, err := func() (internal.RPCType, proto.Message, error) {
|
r.proxyLeader(conn.(*net.TCPConn), pack(typ, buf))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
typ, resp, err := r.executeMessage(conn, typ, buf)
|
||||||
|
|
||||||
|
// Handle unexpected RPC errors
|
||||||
|
if err != nil {
|
||||||
|
r.sendError(conn, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the status header and error message
|
||||||
|
if reply, ok := resp.(Reply); ok {
|
||||||
|
reply.GetHeader().OK = proto.Bool(err == nil)
|
||||||
|
if err != nil {
|
||||||
|
reply.GetHeader().Error = proto.String(err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.sendResponse(conn, typ, resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *rpc) readMessage(conn net.Conn) (internal.RPCType, []byte, error) {
|
||||||
// Read request size.
|
// Read request size.
|
||||||
var sz uint64
|
var sz uint64
|
||||||
if err := binary.Read(conn, binary.BigEndian, &sz); err != nil {
|
if err := binary.Read(conn, binary.BigEndian, &sz); err != nil {
|
||||||
|
@ -90,11 +122,11 @@ func (r *rpc) handleRPCConn(conn net.Conn) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if sz == 0 {
|
if sz == 0 {
|
||||||
return 0, nil, fmt.Errorf("invalid message size: %d", sz)
|
return internal.RPCType_Error, nil, fmt.Errorf("invalid message size: %d", sz)
|
||||||
}
|
}
|
||||||
|
|
||||||
if sz >= MaxMessageSize {
|
if sz >= MaxMessageSize {
|
||||||
return 0, nil, fmt.Errorf("max message size of %d exceeded: %d", MaxMessageSize, sz)
|
return internal.RPCType_Error, nil, fmt.Errorf("max message size of %d exceeded: %d", MaxMessageSize, sz)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read request.
|
// Read request.
|
||||||
|
@ -108,6 +140,10 @@ func (r *rpc) handleRPCConn(conn net.Conn) {
|
||||||
buf = buf[8:]
|
buf = buf[8:]
|
||||||
|
|
||||||
r.traceCluster("recv %v request on: %v", rpcType, conn.RemoteAddr())
|
r.traceCluster("recv %v request on: %v", rpcType, conn.RemoteAddr())
|
||||||
|
return rpcType, buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *rpc) executeMessage(conn net.Conn, rpcType internal.RPCType, buf []byte) (internal.RPCType, proto.Message, error) {
|
||||||
switch rpcType {
|
switch rpcType {
|
||||||
case internal.RPCType_FetchData:
|
case internal.RPCType_FetchData:
|
||||||
var req internal.FetchDataRequest
|
var req internal.FetchDataRequest
|
||||||
|
@ -123,30 +159,16 @@ func (r *rpc) handleRPCConn(conn net.Conn) {
|
||||||
}
|
}
|
||||||
resp, err := r.handleJoinRequest(&req)
|
resp, err := r.handleJoinRequest(&req)
|
||||||
return rpcType, resp, err
|
return rpcType, resp, err
|
||||||
|
case internal.RPCType_PromoteRaft:
|
||||||
|
var req internal.PromoteRaftRequest
|
||||||
|
if err := proto.Unmarshal(buf, &req); err != nil {
|
||||||
|
return internal.RPCType_Error, nil, fmt.Errorf("promote to raft request unmarshal: %v", err)
|
||||||
|
}
|
||||||
|
resp, err := r.handlePromoteRaftRequest(&req)
|
||||||
|
return rpcType, resp, err
|
||||||
default:
|
default:
|
||||||
return internal.RPCType_Error, nil, fmt.Errorf("unknown rpc type:%v", rpcType)
|
return internal.RPCType_Error, nil, fmt.Errorf("unknown rpc type:%v", rpcType)
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
|
|
||||||
// Handle unexpected RPC errors
|
|
||||||
if err != nil {
|
|
||||||
resp = &internal.ErrorResponse{
|
|
||||||
Header: &internal.ResponseHeader{
|
|
||||||
OK: proto.Bool(false),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
typ = internal.RPCType_Error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the status header and error message
|
|
||||||
if reply, ok := resp.(Reply); ok {
|
|
||||||
reply.GetHeader().OK = proto.Bool(err == nil)
|
|
||||||
if err != nil {
|
|
||||||
reply.GetHeader().Error = proto.String(err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
r.sendResponse(conn, typ, resp)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *rpc) sendResponse(conn net.Conn, typ internal.RPCType, resp proto.Message) {
|
func (r *rpc) sendResponse(conn net.Conn, typ internal.RPCType, resp proto.Message) {
|
||||||
|
@ -158,7 +180,7 @@ func (r *rpc) sendResponse(conn net.Conn, typ internal.RPCType, resp proto.Messa
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encode response back to connection.
|
// Encode response back to connection.
|
||||||
if _, err := conn.Write(r.pack(typ, buf)); err != nil {
|
if _, err := conn.Write(pack(typ, buf)); err != nil {
|
||||||
r.logger.Printf("unable to write rpc response: %s", err)
|
r.logger.Printf("unable to write rpc response: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -269,12 +291,39 @@ func (r *rpc) handleJoinRequest(req *internal.JoinRequest) (*internal.JoinRespon
|
||||||
RaftNodes: peers,
|
RaftNodes: peers,
|
||||||
NodeID: proto.Uint64(nodeID),
|
NodeID: proto.Uint64(nodeID),
|
||||||
}, err
|
}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *rpc) handlePromoteRaftRequest(req *internal.PromoteRaftRequest) (*internal.PromoteRaftResponse, error) {
|
||||||
|
r.traceCluster("promote raft request from: %v", *req.Addr)
|
||||||
|
|
||||||
|
// Need to set the local store peers to match what we are about to join
|
||||||
|
if err := r.store.SetPeers(req.RaftNodes); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := r.store.enableLocalRaft(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !contains(req.RaftNodes, *req.Addr) {
|
||||||
|
req.RaftNodes = append(req.RaftNodes, *req.Addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := r.store.SetPeers(req.RaftNodes); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &internal.PromoteRaftResponse{
|
||||||
|
Header: &internal.ResponseHeader{
|
||||||
|
OK: proto.Bool(true),
|
||||||
|
},
|
||||||
|
Success: proto.Bool(true),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// pack returns a TLV style byte slice encoding the size of the payload, the RPC type
|
// pack returns a TLV style byte slice encoding the size of the payload, the RPC type
|
||||||
// and the RPC data
|
// and the RPC data
|
||||||
func (r *rpc) pack(typ internal.RPCType, b []byte) []byte {
|
func pack(typ internal.RPCType, b []byte) []byte {
|
||||||
buf := u64tob(uint64(len(b)) + 8)
|
buf := u64tob(uint64(len(b)) + 8)
|
||||||
buf = append(buf, u64tob(uint64(typ))...)
|
buf = append(buf, u64tob(uint64(typ))...)
|
||||||
buf = append(buf, b...)
|
buf = append(buf, b...)
|
||||||
|
@ -351,6 +400,29 @@ func (r *rpc) join(localAddr, remoteAddr string) (*JoinResult, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// enableRaft attempts to promote a node at remoteAddr using localAddr as the current
|
||||||
|
// node's cluster address
|
||||||
|
func (r *rpc) enableRaft(addr string, peers []string) error {
|
||||||
|
req := &internal.PromoteRaftRequest{
|
||||||
|
Addr: proto.String(addr),
|
||||||
|
RaftNodes: peers,
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := r.call(addr, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t := resp.(type) {
|
||||||
|
case *internal.PromoteRaftResponse:
|
||||||
|
return nil
|
||||||
|
case *internal.ErrorResponse:
|
||||||
|
return fmt.Errorf("rpc failed: %s", t.GetHeader().GetError())
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("rpc failed: unknown response type: %v", t.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// call sends an encoded request to the remote leader and returns
|
// call sends an encoded request to the remote leader and returns
|
||||||
// an encoded response value.
|
// an encoded response value.
|
||||||
func (r *rpc) call(dest string, req proto.Message) (proto.Message, error) {
|
func (r *rpc) call(dest string, req proto.Message) (proto.Message, error) {
|
||||||
|
@ -361,6 +433,8 @@ func (r *rpc) call(dest string, req proto.Message) (proto.Message, error) {
|
||||||
rpcType = internal.RPCType_Join
|
rpcType = internal.RPCType_Join
|
||||||
case *internal.FetchDataRequest:
|
case *internal.FetchDataRequest:
|
||||||
rpcType = internal.RPCType_FetchData
|
rpcType = internal.RPCType_FetchData
|
||||||
|
case *internal.PromoteRaftRequest:
|
||||||
|
rpcType = internal.RPCType_PromoteRaft
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown rpc request type: %v", t)
|
return nil, fmt.Errorf("unknown rpc request type: %v", t)
|
||||||
}
|
}
|
||||||
|
@ -384,7 +458,7 @@ func (r *rpc) call(dest string, req proto.Message) (proto.Message, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write request size & bytes.
|
// Write request size & bytes.
|
||||||
if _, err := conn.Write(r.pack(rpcType, b)); err != nil {
|
if _, err := conn.Write(pack(rpcType, b)); err != nil {
|
||||||
return nil, fmt.Errorf("write %v rpc: %s", rpcType, err)
|
return nil, fmt.Errorf("write %v rpc: %s", rpcType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -417,6 +491,8 @@ func (r *rpc) call(dest string, req proto.Message) (proto.Message, error) {
|
||||||
resp = &internal.FetchDataResponse{}
|
resp = &internal.FetchDataResponse{}
|
||||||
case internal.RPCType_Error:
|
case internal.RPCType_Error:
|
||||||
resp = &internal.ErrorResponse{}
|
resp = &internal.ErrorResponse{}
|
||||||
|
case internal.RPCType_PromoteRaft:
|
||||||
|
resp = &internal.PromoteRaftResponse{}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown rpc response type: %v", rpcType)
|
return nil, fmt.Errorf("unknown rpc response type: %v", rpcType)
|
||||||
}
|
}
|
||||||
|
|
|
@ -240,3 +240,9 @@ func (f *fakeStore) WaitForDataChanged() error {
|
||||||
<-f.blockChan
|
<-f.blockChan
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func (f *fakeStore) enableLocalRaft() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (f *fakeStore) SetPeers(addrs []string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -35,6 +35,7 @@ type raftState interface {
|
||||||
lastIndex() uint64
|
lastIndex() uint64
|
||||||
apply(b []byte) error
|
apply(b []byte) error
|
||||||
snapshot() error
|
snapshot() error
|
||||||
|
isLocal() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// localRaft is a consensus strategy that uses a local raft implementation for
|
// localRaft is a consensus strategy that uses a local raft implementation for
|
||||||
|
@ -114,14 +115,15 @@ func (r *localRaft) open() error {
|
||||||
config.ElectionTimeout = s.ElectionTimeout
|
config.ElectionTimeout = s.ElectionTimeout
|
||||||
config.LeaderLeaseTimeout = s.LeaderLeaseTimeout
|
config.LeaderLeaseTimeout = s.LeaderLeaseTimeout
|
||||||
config.CommitTimeout = s.CommitTimeout
|
config.CommitTimeout = s.CommitTimeout
|
||||||
|
// Since we actually never call `removePeer` this is safe.
|
||||||
|
// If in the future we decide to call remove peer we have to re-evaluate how to handle this
|
||||||
|
config.ShutdownOnRemove = false
|
||||||
|
|
||||||
// If no peers are set in the config or there is one and we are it, then start as a single server.
|
// If no peers are set in the config or there is one and we are it, then start as a single server.
|
||||||
if len(s.peers) <= 1 {
|
if len(s.peers) <= 1 {
|
||||||
config.EnableSingleNode = true
|
config.EnableSingleNode = true
|
||||||
// Ensure we can always become the leader
|
// Ensure we can always become the leader
|
||||||
config.DisableBootstrapAfterElect = false
|
config.DisableBootstrapAfterElect = false
|
||||||
// Don't shutdown raft automatically if we renamed our hostname back to a previous name
|
|
||||||
config.ShutdownOnRemove = false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build raft layer to multiplex listener.
|
// Build raft layer to multiplex listener.
|
||||||
|
@ -152,7 +154,7 @@ func (r *localRaft) open() error {
|
||||||
// is difficult to resolve automatically because we need to have all the raft peers agree on the current members
|
// is difficult to resolve automatically because we need to have all the raft peers agree on the current members
|
||||||
// of the cluster before we can change them.
|
// of the cluster before we can change them.
|
||||||
if len(peers) > 0 && !raft.PeerContained(peers, s.RemoteAddr.String()) {
|
if len(peers) > 0 && !raft.PeerContained(peers, s.RemoteAddr.String()) {
|
||||||
s.Logger.Printf("%v is not in the list of raft peers. Please update %v/peers.json on all raft nodes to have the same contents.", s.RemoteAddr.String(), s.Path())
|
s.Logger.Printf("%s is not in the list of raft peers. Please update %v/peers.json on all raft nodes to have the same contents.", s.RemoteAddr.String(), s.Path())
|
||||||
return fmt.Errorf("peers out of sync: %v not in %v", s.RemoteAddr.String(), peers)
|
return fmt.Errorf("peers out of sync: %v not in %v", s.RemoteAddr.String(), peers)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -350,6 +352,10 @@ func (r *localRaft) isLeader() bool {
|
||||||
return r.raft.State() == raft.Leader
|
return r.raft.State() == raft.Leader
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *localRaft) isLocal() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// remoteRaft is a consensus strategy that uses a remote raft cluster for
|
// remoteRaft is a consensus strategy that uses a remote raft cluster for
|
||||||
// consensus operations.
|
// consensus operations.
|
||||||
type remoteRaft struct {
|
type remoteRaft struct {
|
||||||
|
@ -468,6 +474,10 @@ func (r *remoteRaft) isLeader() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *remoteRaft) isLocal() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (r *remoteRaft) lastIndex() uint64 {
|
func (r *remoteRaft) lastIndex() uint64 {
|
||||||
return r.store.cachedData().Index
|
return r.store.cachedData().Index
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,6 +14,7 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -123,6 +124,10 @@ type Store struct {
|
||||||
// Returns an error if the password is invalid or a hash cannot be generated.
|
// Returns an error if the password is invalid or a hash cannot be generated.
|
||||||
hashPassword HashPasswordFn
|
hashPassword HashPasswordFn
|
||||||
|
|
||||||
|
// raftPromotionEnabled determines if non-raft nodes should be automatically
|
||||||
|
// promoted to a raft node to self-heal a raft cluster
|
||||||
|
raftPromotionEnabled bool
|
||||||
|
|
||||||
Logger *log.Logger
|
Logger *log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,6 +150,7 @@ func NewStore(c *Config) *Store {
|
||||||
|
|
||||||
clusterTracingEnabled: c.ClusterTracing,
|
clusterTracingEnabled: c.ClusterTracing,
|
||||||
retentionAutoCreate: c.RetentionAutoCreate,
|
retentionAutoCreate: c.RetentionAutoCreate,
|
||||||
|
raftPromotionEnabled: c.RaftPromotionEnabled,
|
||||||
|
|
||||||
HeartbeatTimeout: time.Duration(c.HeartbeatTimeout),
|
HeartbeatTimeout: time.Duration(c.HeartbeatTimeout),
|
||||||
ElectionTimeout: time.Duration(c.ElectionTimeout),
|
ElectionTimeout: time.Duration(c.ElectionTimeout),
|
||||||
|
@ -255,7 +261,17 @@ func (s *Store) Open() error {
|
||||||
// Wait for a leader to be elected so we know the raft log is loaded
|
// Wait for a leader to be elected so we know the raft log is loaded
|
||||||
// and up to date
|
// and up to date
|
||||||
<-s.ready
|
<-s.ready
|
||||||
return s.WaitForLeader(0)
|
if err := s.WaitForLeader(0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.raftPromotionEnabled {
|
||||||
|
s.wg.Add(1)
|
||||||
|
s.Logger.Printf("spun up monitoring for %d", s.NodeID())
|
||||||
|
go s.monitorPeerHealth()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// syncNodeInfo continuously tries to update the current nodes hostname
|
// syncNodeInfo continuously tries to update the current nodes hostname
|
||||||
|
@ -415,6 +431,77 @@ func (s *Store) changeState(state raftState) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// monitorPeerHealth periodically checks if we have a node that can be promoted to a
|
||||||
|
// raft peer to fill any missing slots.
|
||||||
|
// This function runs in a separate goroutine.
|
||||||
|
func (s *Store) monitorPeerHealth() {
|
||||||
|
defer s.wg.Done()
|
||||||
|
|
||||||
|
ticker := time.NewTicker(1 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Wait for next tick or timeout.
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
case <-s.closing:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := s.promoteNodeToPeer(); err != nil {
|
||||||
|
s.Logger.Printf("error promoting node to raft peer: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Store) promoteNodeToPeer() error {
|
||||||
|
// Only do this if you are the leader
|
||||||
|
|
||||||
|
if !s.IsLeader() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
peers, err := s.raftState.peers()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes := s.data.Nodes
|
||||||
|
var nonraft NodeInfos
|
||||||
|
for _, n := range nodes {
|
||||||
|
if contains(peers, n.Host) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
nonraft = append(nonraft, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check to see if any action is required or possible
|
||||||
|
if len(peers) >= 3 || len(nonraft) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort the nodes
|
||||||
|
sort.Sort(nonraft)
|
||||||
|
|
||||||
|
// Get the lowest node for a deterministic outcome
|
||||||
|
n := nonraft[0]
|
||||||
|
// Set peers on the leader now to the new peers
|
||||||
|
if err := s.AddPeer(n.Host); err != nil {
|
||||||
|
return fmt.Errorf("unable to add raft peer %s on leader: %s", n.Host, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add node to peers list
|
||||||
|
peers = append(peers, n.Host)
|
||||||
|
if err := s.rpc.enableRaft(n.Host, peers); err != nil {
|
||||||
|
return fmt.Errorf("error notifying raft peer: %s", err)
|
||||||
|
}
|
||||||
|
s.Logger.Printf("promoted nodeID %d, host %s to raft peer", n.ID, n.Host)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// openRaft initializes the raft store.
|
// openRaft initializes the raft store.
|
||||||
func (s *Store) openRaft() error {
|
func (s *Store) openRaft() error {
|
||||||
return s.raftState.open()
|
return s.raftState.open()
|
||||||
|
@ -604,6 +691,13 @@ func (s *Store) IsLeader() bool {
|
||||||
return s.raftState.isLeader()
|
return s.raftState.isLeader()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsLocal returns true if the store is currently participating in local raft.
|
||||||
|
func (s *Store) IsLocal() bool {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.raftState.isLocal()
|
||||||
|
}
|
||||||
|
|
||||||
// Leader returns what the store thinks is the current leader. An empty
|
// Leader returns what the store thinks is the current leader. An empty
|
||||||
// string indicates no leader exists.
|
// string indicates no leader exists.
|
||||||
func (s *Store) Leader() string {
|
func (s *Store) Leader() string {
|
||||||
|
@ -754,11 +848,11 @@ func (s *Store) serveRPCListener() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "connection closed") {
|
if strings.Contains(err.Error(), "connection closed") {
|
||||||
return
|
return
|
||||||
} else {
|
}
|
||||||
|
|
||||||
s.Logger.Printf("temporary accept error: %s", err)
|
s.Logger.Printf("temporary accept error: %s", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Handle connection in a separate goroutine.
|
// Handle connection in a separate goroutine.
|
||||||
s.wg.Add(1)
|
s.wg.Add(1)
|
||||||
|
@ -1199,6 +1293,8 @@ func (s *Store) ShardGroupByTimestamp(database, policy string, timestamp time.Ti
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ShardOwner looks up for a specific shard and return the shard group information
|
||||||
|
// related with the shard.
|
||||||
func (s *Store) ShardOwner(shardID uint64) (database, policy string, sgi *ShardGroupInfo) {
|
func (s *Store) ShardOwner(shardID uint64) (database, policy string, sgi *ShardGroupInfo) {
|
||||||
s.read(func(data *Data) error {
|
s.read(func(data *Data) error {
|
||||||
for _, dbi := range data.Databases {
|
for _, dbi := range data.Databases {
|
||||||
|
@ -2211,8 +2307,13 @@ type RetentionPolicyUpdate struct {
|
||||||
ReplicaN *int
|
ReplicaN *int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetName sets the RetentionPolicyUpdate.Name
|
||||||
func (rpu *RetentionPolicyUpdate) SetName(v string) { rpu.Name = &v }
|
func (rpu *RetentionPolicyUpdate) SetName(v string) { rpu.Name = &v }
|
||||||
|
|
||||||
|
// SetDuration sets the RetentionPolicyUpdate.Duration
|
||||||
func (rpu *RetentionPolicyUpdate) SetDuration(v time.Duration) { rpu.Duration = &v }
|
func (rpu *RetentionPolicyUpdate) SetDuration(v time.Duration) { rpu.Duration = &v }
|
||||||
|
|
||||||
|
// SetReplicaN sets the RetentionPolicyUpdate.ReplicaN
|
||||||
func (rpu *RetentionPolicyUpdate) SetReplicaN(v int) { rpu.ReplicaN = &v }
|
func (rpu *RetentionPolicyUpdate) SetReplicaN(v int) { rpu.ReplicaN = &v }
|
||||||
|
|
||||||
// assert will panic with a given formatted message if the given condition is false.
|
// assert will panic with a given formatted message if the given condition is false.
|
||||||
|
|
|
@ -1056,6 +1056,79 @@ func TestCluster_Restart(t *testing.T) {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure a multi-node cluster can start, join the cluster, and the first three members are raft nodes., then add a 4th non raft
|
||||||
|
// Remove a raft node, ensure the 4th promotes to raft
|
||||||
|
func TestCluster_ReplaceRaft(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
// Start a single node.
|
||||||
|
c := MustOpenCluster(1)
|
||||||
|
defer c.Close()
|
||||||
|
|
||||||
|
// Check that the node becomes leader.
|
||||||
|
if s := c.Leader(); s == nil {
|
||||||
|
t.Fatal("no leader found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add 2 more nodes.
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
if err := c.Join(); err != nil {
|
||||||
|
t.Fatalf("failed to join cluster: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sleep to let them become raft
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
// ensure we have 3 raft nodes
|
||||||
|
for _, s := range c.Stores {
|
||||||
|
if !s.IsLocal() {
|
||||||
|
t.Fatalf("node %d is not a local raft instance.", s.NodeID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure all the nodes see the same metastore data
|
||||||
|
assertDatabaseReplicated(t, c)
|
||||||
|
|
||||||
|
// Add another node
|
||||||
|
if err := c.Join(); err != nil {
|
||||||
|
t.Fatalf("failed to join cluster: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var leader, follower *Store
|
||||||
|
|
||||||
|
// find a non-leader node
|
||||||
|
for _, s := range c.Stores {
|
||||||
|
if s.IsLeader() {
|
||||||
|
leader = s
|
||||||
|
}
|
||||||
|
// Find any follower to remove
|
||||||
|
if !s.IsLeader() && s.IsLocal() {
|
||||||
|
follower = s
|
||||||
|
}
|
||||||
|
if leader != nil && follower != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// drop the node
|
||||||
|
if err := leader.DeleteNode(follower.NodeID(), true); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := c.Remove(follower.NodeID()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sleep to let them become raft
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
|
// ensure we have 3 raft nodes
|
||||||
|
for _, s := range c.Stores {
|
||||||
|
if !s.IsLocal() {
|
||||||
|
t.Fatalf("node %d is not a local raft instance.", s.NodeID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Store is a test wrapper for meta.Store.
|
// Store is a test wrapper for meta.Store.
|
||||||
type Store struct {
|
type Store struct {
|
||||||
*meta.Store
|
*meta.Store
|
||||||
|
@ -1156,6 +1229,7 @@ func NewConfig(path string) *meta.Config {
|
||||||
ElectionTimeout: toml.Duration(500 * time.Millisecond),
|
ElectionTimeout: toml.Duration(500 * time.Millisecond),
|
||||||
LeaderLeaseTimeout: toml.Duration(500 * time.Millisecond),
|
LeaderLeaseTimeout: toml.Duration(500 * time.Millisecond),
|
||||||
CommitTimeout: toml.Duration(5 * time.Millisecond),
|
CommitTimeout: toml.Duration(5 * time.Millisecond),
|
||||||
|
RaftPromotionEnabled: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1210,6 +1284,17 @@ func (c *Cluster) Join() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) Remove(nodeID uint64) error {
|
||||||
|
for i, s := range c.Stores {
|
||||||
|
if s.NodeID() == nodeID {
|
||||||
|
// This could hang for a variety of reasons, so don't wait for it
|
||||||
|
go s.Close()
|
||||||
|
c.Stores = append(c.Stores[:i], c.Stores[i+1:]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Open opens and initializes all stores in the cluster.
|
// Open opens and initializes all stores in the cluster.
|
||||||
func (c *Cluster) Open() error {
|
func (c *Cluster) Open() error {
|
||||||
if err := func() error {
|
if err := func() error {
|
||||||
|
|
|
@ -63,7 +63,7 @@ if [ -z "$FPM" ]; then
|
||||||
FPM=`which fpm`
|
FPM=`which fpm`
|
||||||
fi
|
fi
|
||||||
|
|
||||||
GO_VERSION="go1.5.1"
|
GO_VERSION="go1.4.2"
|
||||||
GOPATH_INSTALL=
|
GOPATH_INSTALL=
|
||||||
BINS=(
|
BINS=(
|
||||||
influxd
|
influxd
|
||||||
|
@ -267,7 +267,7 @@ do_build() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
date=`date -u --iso-8601=seconds`
|
date=`date -u --iso-8601=seconds`
|
||||||
go install $RACE -a -ldflags="-X main.version=$version -X main.branch=$branch -X main.commit=$commit -X main.buildTime=$date" ./...
|
go install $RACE -a -ldflags="-X main.version $version -X main.branch $branch -X main.commit $commit -X main.buildTime $date" ./...
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "Build failed, unable to create package -- aborting"
|
echo "Build failed, unable to create package -- aborting"
|
||||||
cleanup_exit 1
|
cleanup_exit 1
|
||||||
|
|
15
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/config.go
generated
vendored
15
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/config.go
generated
vendored
|
@ -51,9 +51,9 @@ const (
|
||||||
|
|
||||||
// Config represents the configuration for Graphite endpoints.
|
// Config represents the configuration for Graphite endpoints.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Enabled bool `toml:"enabled"`
|
|
||||||
BindAddress string `toml:"bind-address"`
|
BindAddress string `toml:"bind-address"`
|
||||||
Database string `toml:"database"`
|
Database string `toml:"database"`
|
||||||
|
Enabled bool `toml:"enabled"`
|
||||||
Protocol string `toml:"protocol"`
|
Protocol string `toml:"protocol"`
|
||||||
BatchSize int `toml:"batch-size"`
|
BatchSize int `toml:"batch-size"`
|
||||||
BatchPending int `toml:"batch-pending"`
|
BatchPending int `toml:"batch-pending"`
|
||||||
|
@ -65,19 +65,6 @@ type Config struct {
|
||||||
UDPReadBuffer int `toml:"udp-read-buffer"`
|
UDPReadBuffer int `toml:"udp-read-buffer"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewConfig() Config {
|
|
||||||
return Config{
|
|
||||||
BindAddress: DefaultBindAddress,
|
|
||||||
Database: DefaultDatabase,
|
|
||||||
Protocol: DefaultProtocol,
|
|
||||||
BatchSize: DefaultBatchSize,
|
|
||||||
BatchPending: DefaultBatchPending,
|
|
||||||
BatchTimeout: toml.Duration(DefaultBatchTimeout),
|
|
||||||
ConsistencyLevel: DefaultConsistencyLevel,
|
|
||||||
Separator: DefaultSeparator,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDefaults takes the given config and returns a new config with any required
|
// WithDefaults takes the given config and returns a new config with any required
|
||||||
// default values set.
|
// default values set.
|
||||||
func (c *Config) WithDefaults() *Config {
|
func (c *Config) WithDefaults() *Config {
|
||||||
|
|
|
@ -47,7 +47,7 @@ type Config struct {
|
||||||
|
|
||||||
func NewConfig() Config {
|
func NewConfig() Config {
|
||||||
return Config{
|
return Config{
|
||||||
Enabled: true,
|
Enabled: false,
|
||||||
MaxSize: DefaultMaxSize,
|
MaxSize: DefaultMaxSize,
|
||||||
MaxAge: toml.Duration(DefaultMaxAge),
|
MaxAge: toml.Duration(DefaultMaxAge),
|
||||||
RetryRateLimit: DefaultRetryRateLimit,
|
RetryRateLimit: DefaultRetryRateLimit,
|
||||||
|
|
|
@ -53,3 +53,21 @@ purge-interval = "1h"
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDefaultDisabled(t *testing.T) {
|
||||||
|
// Parse empty configuration.
|
||||||
|
var c hh.Config
|
||||||
|
if _, err := toml.Decode(``, &c); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := false; c.Enabled == true {
|
||||||
|
t.Fatalf("unexpected default Enabled value: got %v, exp %v", c.Enabled, exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default configuration.
|
||||||
|
c = hh.NewConfig()
|
||||||
|
if exp := false; c.Enabled == true {
|
||||||
|
t.Fatalf("unexpected default enabled value: got %v, exp %v", c.Enabled, exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -7,15 +7,9 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// DefaultBindAddress is the default binding interface if none is specified.
|
|
||||||
DefaultBindAddress = ":8089"
|
|
||||||
|
|
||||||
// DefaultDatabase is the default database for UDP traffic.
|
// DefaultDatabase is the default database for UDP traffic.
|
||||||
DefaultDatabase = "udp"
|
DefaultDatabase = "udp"
|
||||||
|
|
||||||
// DefaultRetentionPolicy is the default retention policy used for writes.
|
|
||||||
DefaultRetentionPolicy = ""
|
|
||||||
|
|
||||||
// DefaultBatchSize is the default UDP batch size.
|
// DefaultBatchSize is the default UDP batch size.
|
||||||
DefaultBatchSize = 5000
|
DefaultBatchSize = 5000
|
||||||
|
|
||||||
|
@ -51,17 +45,6 @@ type Config struct {
|
||||||
BatchTimeout toml.Duration `toml:"batch-timeout"`
|
BatchTimeout toml.Duration `toml:"batch-timeout"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewConfig() Config {
|
|
||||||
return Config{
|
|
||||||
BindAddress: DefaultBindAddress,
|
|
||||||
Database: DefaultDatabase,
|
|
||||||
RetentionPolicy: DefaultRetentionPolicy,
|
|
||||||
BatchSize: DefaultBatchSize,
|
|
||||||
BatchPending: DefaultBatchPending,
|
|
||||||
BatchTimeout: toml.Duration(DefaultBatchTimeout),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDefaults takes the given config and returns a new config with any required
|
// WithDefaults takes the given config and returns a new config with any required
|
||||||
// default values set.
|
// default values set.
|
||||||
func (c *Config) WithDefaults() *Config {
|
func (c *Config) WithDefaults() *Config {
|
||||||
|
|
411
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/DESIGN.md
generated
vendored
Normal file
411
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/DESIGN.md
generated
vendored
Normal file
|
@ -0,0 +1,411 @@
|
||||||
|
# File Structure
|
||||||
|
|
||||||
|
A TSM file is composed for four sections: header, blocks, index and the footer.
|
||||||
|
|
||||||
|
```
|
||||||
|
┌────────┬────────────────────────────────────┬─────────────┬──────────────┐
|
||||||
|
│ Header │ Blocks │ Index │ Footer │
|
||||||
|
│5 bytes │ N bytes │ N bytes │ 4 bytes │
|
||||||
|
└────────┴────────────────────────────────────┴─────────────┴──────────────┘
|
||||||
|
```
|
||||||
|
Header is composed of a magic number to identify the file type and a version number.
|
||||||
|
|
||||||
|
```
|
||||||
|
┌───────────────────┐
|
||||||
|
│ Header │
|
||||||
|
├─────────┬─────────┤
|
||||||
|
│ Magic │ Version │
|
||||||
|
│ 4 bytes │ 1 byte │
|
||||||
|
└─────────┴─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Blocks are sequences of block CRC32 and data. The block data is opaque to the file. The CRC32 is used for recovery to ensure blocks have not been corrupted due to bugs outside of our control. The length of the blocks is stored in the index.
|
||||||
|
|
||||||
|
```
|
||||||
|
┌───────────────────────────────────────────────────────────┐
|
||||||
|
│ Blocks │
|
||||||
|
├───────────────────┬───────────────────┬───────────────────┤
|
||||||
|
│ Block 1 │ Block 2 │ Block N │
|
||||||
|
├─────────┬─────────┼─────────┬─────────┼─────────┬─────────┤
|
||||||
|
│ CRC │ Data │ CRC │ Data │ CRC │ Data │
|
||||||
|
│ 4 bytes │ N bytes │ 4 bytes │ N bytes │ 4 bytes │ N bytes │
|
||||||
|
└─────────┴─────────┴─────────┴─────────┴─────────┴─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Following the blocks is the index for the blocks in the file. The index is composed of a sequence of index entries ordered lexicographically by key and then by time. Each index entry starts with a key length and key followed by a count of the number of blocks in the file. Each block entry is composed of the min and max time for the block, the offset into the file where the block is located and the the size of the block.
|
||||||
|
|
||||||
|
The index structure can provide efficient access to all blocks as well as the ability to determine the cost associated with acessing given key. Given a key and timestamp, we know exactly which file contains the block for that timestamp as well as where that block resides and how much data to read to retrieve the block. If we know we need to read all or multiple blocks in a file, we can use the size to determine how much to read in a given IO.
|
||||||
|
|
||||||
|
_TBD: The block length stored in the block data could probably be dropped since we store it in the index._
|
||||||
|
|
||||||
|
```
|
||||||
|
┌──────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ Index │
|
||||||
|
├─────────┬─────────┬───────┬─────────┬─────────┬─────────┬─────────┬──────┤
|
||||||
|
│ Key Len │ Key │ Count │Min Time │Max Time │ Offset │ Size │ ... │
|
||||||
|
│ 2 bytes │ N bytes │2 bytes│ 8 bytes │ 8 bytes │ 8 bytes │ 4 bytes │ │
|
||||||
|
└─────────┴─────────┴───────┴─────────┴─────────┴─────────┴─────────┴──────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
The last section is the footer that stores the offset of the start of the index.
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────┐
|
||||||
|
│ Footer │
|
||||||
|
├─────────┤
|
||||||
|
│Index Ofs│
|
||||||
|
│ 8 bytes │
|
||||||
|
└─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
# File System Layout
|
||||||
|
|
||||||
|
The file system is organized a directory per shard where each shard is integer number. Within the shard dir exists a set of other directories and files:
|
||||||
|
|
||||||
|
* wal Dir - Contains a set numerically increasing files WAL segment files name ######.wal. The wal dir will be a separate location from the TSM data files so that different types can be used if necessary.
|
||||||
|
* TSM files - A set of numerically increasing TSM files containing compressed series data.
|
||||||
|
* Tombstone files - Files named after the corresponding TSM file as #####.tombstone. These contain measurement and series keys that have been deleted. These are removed during compactions.
|
||||||
|
|
||||||
|
# Data Flow
|
||||||
|
|
||||||
|
Writes are appended to the current WAL segment and are also added to the Cache. Each WAL segment is size bounded and rolls-over to a new file after it fills up. The cache is also size bounded and older entries are evicted as new entries are added to maintain the size. The WAL and Cache are separate entities and do not interact with each other. The Engine coordinates the writes to both.
|
||||||
|
|
||||||
|
When WAL segments fill up and closed, the Compactor reads the WAL entries and combines then with one or more existing TSM files. This process runs continously until all WAL files are compacted and there is a minimum number of TSM files. As each TSM file is completed, it is loaded and referenced by the FileStore.
|
||||||
|
|
||||||
|
Queries are executed by constructing Cursors for keys. The Cursors iterate of slices of Values. When the current Values are exhausted, a cursor requests a the next set of Values from the Engine. The Engine returns a slice of Values by querying the FileStore and Cache. The Values in the Cache are overlayed on top of the values returned from the FileStore. The FileStore reads and decodes blocks of Values according to the index for the file.
|
||||||
|
|
||||||
|
Updates (writing a newer value for a point that already exists) occur as normal writes. Since cached values overwrite existing values, newer writes take precedence.
|
||||||
|
|
||||||
|
Deletes occur by writing a delete entry for the measurement or series to the WAL and then update the Cache and FileStore. The Cache evicts all relevant entries. The FileStore writes a tombstone file for each TSM file that contains relevent data. These tombstone files are used at startup time to ignore blocks as well as during compactions to remove deleted entries.
|
||||||
|
|
||||||
|
# Compactions
|
||||||
|
|
||||||
|
Compactions are a serial and continously running process that iteratively optimizes the storage for queries. Specifically, it does the following:
|
||||||
|
|
||||||
|
* Converts closed WAL files into TSM files and removes the closed WAL files
|
||||||
|
* Combines smaller TSM files into larger ones to improve compression ratios
|
||||||
|
* Rewrites existing files that contain series data that has been deleted
|
||||||
|
* Rewrites existing files that contain writes with more recent data to ensure a point exists in only one TSM file.
|
||||||
|
|
||||||
|
The compaction algorithm is continously running and always selects files to compact based on a priority.
|
||||||
|
|
||||||
|
1. If there are closed WAL files, the 5 oldest WAL segments are added to the set of compaction files.
|
||||||
|
2. If any TSM files contain points with older timestamps that also exist in the WAL files, those TSM files are added to the compaction set.
|
||||||
|
3. If any TSM files have a tombstone marker, those TSM files are added to the compaction set.
|
||||||
|
|
||||||
|
The compaction is used to generate a set of SeriesIterators that return a sequence of `key`, `Values` where each `key` returned is lexicographically greater than the previous one. The iterators are ordered such that WAL iterators will override any values return the TSM file iterators. WAL iterators read and cache the WAL segment so that deletes later in the log can be processed correctly. TSM file iterators use the tombstone files to ensure that deleted series are not returned during iteration. As each key is processed, the Values slice is grown, sorted, and then written to a new block in the new TSM file. The blocks can be split based on number of points or size of the block. If the total size of the current TSM file would exceed the maximum file size, a new file is created.
|
||||||
|
|
||||||
|
Deletions can occur while a new file is being written. Since the new TSM file is not complete a tombstone would not be written for it. This could result in deleted values getting written into a new file. To prevent this, if a compaction is running and a delete occurs, the current compaction is aborted and new compaction is started.
|
||||||
|
|
||||||
|
When all files are processed and succesfully written, completion checkpoint markers are created and files are renamed. The engine then notifies the Cache of the last written timestamp which is used for by the Cache to know what entries can be evicted in the future.
|
||||||
|
|
||||||
|
This process then runs again until there are no more WAL files and the minimum number of TSM files exists that are also under the maximum file size.
|
||||||
|
|
||||||
|
# WAL
|
||||||
|
|
||||||
|
Currently, there is a WAL per shard. This means all the writes in a WAL segments are for the given shard. It also means that writes across a lot of shards append to many files which might results in more disk IO due to seeking to the end of multiple files.
|
||||||
|
|
||||||
|
Two options being considered:
|
||||||
|
|
||||||
|
## WAL per Shard
|
||||||
|
|
||||||
|
This is the current behavior of the WAL. This option is conceptually easier to reason about. For example, compactions that read in multiple WAL segments are assured that all the WAL entries pertain to the current shard. If it completes a compaction, it is saft to remove the WAL segment. It is also easier to deal with shard deletions as all the WAL segments can be dropped along with the other shard files.
|
||||||
|
|
||||||
|
The drawback of this option is the potential for turning sequential write IO into random IO in the presence of multiple shards and writes to many different shards.
|
||||||
|
|
||||||
|
## Single WAL
|
||||||
|
|
||||||
|
Using a single WAL adds some complexity to compactions and deletions. Compactions will need to either sort all the WAL entries in a segment by shard first and then run compactiosn on each shard or the compactor needs to be able to compact multiple shards concurrently while ensuring points in existing TSM files in different shards remain separate.
|
||||||
|
|
||||||
|
Deletions would not be able to reclaim WAL segments immediately as in the case where there is a WAL per shard. Similarly, a compaction of a WAL segment that contains writes for a deleted shard would need to be dropped.
|
||||||
|
|
||||||
|
Currently, we are moving towards a Single WAL implemention.
|
||||||
|
|
||||||
|
# TSM File Index
|
||||||
|
|
||||||
|
Each TSM file contains a full index of the blocks contained within the file. The existing index structure is designed to allow for a binary search across the index to find the starting block for a key. We would then seek to that start key and sequentially scan each block to find the location of a timestamp.
|
||||||
|
|
||||||
|
Some issues with the existing structure is that seeking to a given timestamp for a key has a unknown cost. This can cause variability in read performance that would very difficult to fix. Another issue is that startup times for loading a TSM file would grow in proportion to number and size of TSM files on disk since we would need to scan the entire file to find all keys contained in the file. This could be addressed by using a separate index like file or changing the index structure.
|
||||||
|
|
||||||
|
We've chosen to update the block index structure to ensure a TSM file is fully self-contained, supports consistent IO characteristics for sequential and random accesses as well as provides an efficient load time regardless of file size. The implications of these changes are that the index is slightly larger and we need to be able to search the index despite each entry being variably sized.
|
||||||
|
|
||||||
|
The following are some alternative design options to handle the cases where the index is too large to fit in memory. We are currently planning to use an indirect MMAP indexing approach for loaded TSM files.
|
||||||
|
|
||||||
|
### Indirect MMAP Indexing
|
||||||
|
|
||||||
|
One option is to MMAP the index into memory and record the pointers to the start of each index entry in a slice. When searching for a given key, the pointers are used to perform a binary search on the underlying mmap data. When the matching key is found, the block entries can be loaded and search or a subsequent binary search on the blocks can be performed.
|
||||||
|
|
||||||
|
A variation of this can also be done without MMAPs by seeking and reading in the file. The underlying file cache will still be utilized in this approach as well.
|
||||||
|
|
||||||
|
As an example, if we have an index structure in memory such as:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ Index │
|
||||||
|
├─┬──────────────────────┬──┬───────────────────────┬───┬────────────┘
|
||||||
|
│0│ │62│ │145│
|
||||||
|
├─┴───────┬─────────┬────┼──┴──────┬─────────┬──────┼───┴─────┬──────┐
|
||||||
|
│Key 1 Len│ Key │... │Key 2 Len│ Key 2 │ ... │ Key 3 │ ... │
|
||||||
|
│ 2 bytes │ N bytes │ │ 2 bytes │ N bytes │ │ 2 bytes │ │
|
||||||
|
└─────────┴─────────┴────┴─────────┴─────────┴──────┴─────────┴──────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
We would build an `offsets` slices where each element pointers to the byte location for the first key in then index slice.
|
||||||
|
|
||||||
|
```
|
||||||
|
┌────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ Offsets │
|
||||||
|
├────┬────┬────┬─────────────────────────────────────────────────────┘
|
||||||
|
│ 0 │ 62 │145 │
|
||||||
|
└────┴────┴────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Using this offset slice we can find `Key 2` by doing a binary search over the offsets slice. Instead of comparing the value in the offsets (e.g. `62`), we use that as an index into the underlying index to retrieve the key at postion `62` and perform our comparisons with that.
|
||||||
|
|
||||||
|
When we have identified the correct position in the index for a given key, we could perform another binary search or a linear scan. This should be fast as well since each index entry is 28 bytes and all contiguous in memory.
|
||||||
|
|
||||||
|
The size of the offsets slice would be proportional to the number of unique series. If we we limit file sizes to 4GB, we would use 4 bytes for each pointer.
|
||||||
|
|
||||||
|
### LRU/Lazy Load
|
||||||
|
|
||||||
|
A second option could be to have the index work as a memory bounded, lazy-load style cache. When a cache miss occurs, the index structure is scanned to find the the key and the entries are load and added to the cache which causes the least-recently used entries to be evicted.
|
||||||
|
|
||||||
|
### Key Compression
|
||||||
|
|
||||||
|
Another option is compress keys using a key specific dictionary encoding. For example,
|
||||||
|
|
||||||
|
```
|
||||||
|
cpu,host=server1 value=1
|
||||||
|
cpu,host=server2 value=2
|
||||||
|
meory,host=server1 value=3
|
||||||
|
```
|
||||||
|
|
||||||
|
Could be compressed by expanding the key into its respective parts: mesasurment, tag keys, tag values and tag fields . For each part a unique number is assigned. e.g.
|
||||||
|
|
||||||
|
Measurements
|
||||||
|
```
|
||||||
|
cpu = 1
|
||||||
|
memory = 2
|
||||||
|
```
|
||||||
|
|
||||||
|
Tag Keys
|
||||||
|
```
|
||||||
|
host = 1
|
||||||
|
```
|
||||||
|
|
||||||
|
Tag Values
|
||||||
|
```
|
||||||
|
server1 = 1
|
||||||
|
server2 = 2
|
||||||
|
```
|
||||||
|
|
||||||
|
Fields
|
||||||
|
```
|
||||||
|
value = 1
|
||||||
|
```
|
||||||
|
|
||||||
|
Using this encoding dictionary, the string keys could be converted to a sequency of integers:
|
||||||
|
|
||||||
|
```
|
||||||
|
cpu,host=server1 value=1 --> 1,1,1,1
|
||||||
|
cpu,host=server2 value=2 --> 1,1,2,1
|
||||||
|
memory,host=server1 value=3 --> 3,1,2,1
|
||||||
|
```
|
||||||
|
|
||||||
|
These sequences of small integers list can then be compressed further using a bit packed format such as Simple9 or Simple8b. The resulting byte slices would be a multiple of 4 or 8 bytes (using Simple9/Simple8b respectively) which could used as the (string).
|
||||||
|
|
||||||
|
### Separate Index
|
||||||
|
|
||||||
|
Another option might be to have a separate index file (BoltDB) that serves as the storage for the `FileIndex` and is transient. This index would be recreated at startup and updated at compaction time.
|
||||||
|
|
||||||
|
# Components
|
||||||
|
|
||||||
|
These are some of the high-level components and their responsibilities. These are ideas preliminary.
|
||||||
|
|
||||||
|
## WAL
|
||||||
|
|
||||||
|
* Append-only log composed of a fixed size segment files.
|
||||||
|
* Writes are appended to the current segment
|
||||||
|
* Roll-over to new segment after filling the the current segment
|
||||||
|
* Closed segments are never modified and used for startup and recovery as well as compactions.
|
||||||
|
* There is a single WAL for the store as opposed to a WAL per shard.
|
||||||
|
|
||||||
|
## Compactor
|
||||||
|
|
||||||
|
* Continously running, iterative file storage optimizer
|
||||||
|
* Takes closed WAL files, existing TSM files and combines into one or more new TSM files
|
||||||
|
|
||||||
|
## Cache
|
||||||
|
|
||||||
|
* Hold recently written series data
|
||||||
|
* Has max memory limit
|
||||||
|
* When limit is crossed, old entries are expired according to the last compaction checkpoint. Entries written that are older than the last checkpoint time can be evicted.
|
||||||
|
* If a write comes in, points after the checkpoint are evicted, but there is still not enough room to hold the write, the write returns and error.
|
||||||
|
|
||||||
|
# Engine
|
||||||
|
|
||||||
|
* Maintains references to Cache, FileStore, WAL, etc..
|
||||||
|
* Creates a cursor
|
||||||
|
* Receives writes, coordinates queries
|
||||||
|
* Hides underlying files and types from clients
|
||||||
|
|
||||||
|
## Cursor
|
||||||
|
|
||||||
|
* Iterates forward or reverse for given key
|
||||||
|
* Requests values from Engine for key and timestamp
|
||||||
|
* Has no knowledge of TSM files or WAL - delegates to Engine to request next set of Values
|
||||||
|
|
||||||
|
## FileStore
|
||||||
|
|
||||||
|
* Manages TSM files
|
||||||
|
* Maintains the file indexes and references to active files
|
||||||
|
* A TSM file that is opened entails reading in and adding the index section to the `FileIndex`. The block data is then MMAPed up to the index offset to avoid having the index in memory twice.
|
||||||
|
|
||||||
|
## FileIndex
|
||||||
|
* Provides location information to a file and block for a given key and timestamp.
|
||||||
|
|
||||||
|
## Interfaces
|
||||||
|
|
||||||
|
```
|
||||||
|
SeriesIterator returns the key and []Value such that a key is only returned
|
||||||
|
once and subsequent calls to Next() do not return the same key twice.
|
||||||
|
type SeriesIterator interace {
|
||||||
|
func Next() (key, []Value, error)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Types
|
||||||
|
|
||||||
|
_NOTE: the actual func names are to illustrate the type of functionaltiy the type is responsible._
|
||||||
|
|
||||||
|
```
|
||||||
|
TSMWriter writes a sets of key and Values to a TSM file.
|
||||||
|
type TSMWriter struct {}
|
||||||
|
func (t *TSMWriter) Write(key string, values []Value) error {}
|
||||||
|
func (t *TSMWriter) Close() error
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
// WALIterator returns the key and []Values for a set of WAL segment files.
|
||||||
|
type WALIterator struct{
|
||||||
|
Files *os.File
|
||||||
|
}
|
||||||
|
func (r *WALReader) Next() (key, []Value, error)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
TSMIterator returns the key and values from a TSM file.
|
||||||
|
type TSMIterator struct {}
|
||||||
|
funct (r *TSMIterator) Next() (key, []Value, error)
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
type Compactor struct {}
|
||||||
|
func (c *Compactor) Compact(iters ...SeriesIterators) error
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
type Engine struct {
|
||||||
|
wal *WAL
|
||||||
|
cache *Cache
|
||||||
|
fileStore *FileStore
|
||||||
|
compactor *Compactor
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Engine) ValuesBefore(key string, timstamp time.Time) ([]Value, error)
|
||||||
|
func (e *Engine) ValuesAfter(key string, timstamp time.Time) ([]Value, error)
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
type Cursor struct{
|
||||||
|
engine *Engine
|
||||||
|
}
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
// FileStore maintains references
|
||||||
|
type FileStore struct {}
|
||||||
|
func (f *FileStore) ValuesBefore(key string, timstamp time.Time) ([]Value, error)
|
||||||
|
func (f *FileStore) ValuesAfter(key string, timstamp time.Time) ([]Value, error)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
type FileIndex struct {}
|
||||||
|
|
||||||
|
// Returns a file and offset for a block located in the return file that contains the requested key and timestamp.
|
||||||
|
func (f *FileIndex) Location(key, timestamp) (*os.File, uint64, error)
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
type Cache struct {}
|
||||||
|
func (c *Cache) Write(key string, values []Value) error
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
type WAL struct {}
|
||||||
|
func (w *WAL) Write(key string, values []Value)
|
||||||
|
func (w *WAL) ClosedSegments() ([]*os.File, error)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
# Concerns
|
||||||
|
|
||||||
|
## Performance
|
||||||
|
|
||||||
|
There are three categories of performance this design is concerned with:
|
||||||
|
|
||||||
|
* Write Throughput/Latency
|
||||||
|
* Query Throughput/Latency
|
||||||
|
* Startup time
|
||||||
|
* Compaction Throughput/Latency
|
||||||
|
* Memory Usage
|
||||||
|
|
||||||
|
### Writes
|
||||||
|
|
||||||
|
Write throughput is bounded by the time to process the write on the CPU (parsing, sorting, etc..), adding and evicting to the Cache and appending the write to the WAL. The first two items are CPU bound and can be tuned and optimized if they become a bottleneck. The WAL write can be tuned such that in the worst case every write requires at least 2 IOPS (write + fysnc) or batched so that multiple writes are queued and fysnc'd in sizes matching one or more disk blocks. Performing more work with each IO will improve throughput
|
||||||
|
|
||||||
|
Write latency is minimal for the WAL write since there are no seeks. The latency is bounded by the time to complete any write and fysnc calls.
|
||||||
|
|
||||||
|
### Queries
|
||||||
|
|
||||||
|
Query throughput is directly related to how many blocks can be read in a period of time. The index structure contains enough information to determine if one or multiple blocks can be read in a single IO.
|
||||||
|
|
||||||
|
Query latency is determine by how long it takes to find and read the relevant blocks. The in-memory index structure contains the offsets and sizes of all blocks for a key. This allows every block to be read in 2 IOPS (seek + read) regardless of position, structure or size of file.
|
||||||
|
|
||||||
|
### Startup
|
||||||
|
|
||||||
|
Startup time is proportional to the number of WAL files, TSM files and tombstone files. WAL files can be read and process in large batches using the WALIterators. TSM files require reading the index block into memory (5 IOPS/file). Tombstone files are expected to be small and infrequent and would require approximately 2 IOPS/file.
|
||||||
|
|
||||||
|
### Compactions
|
||||||
|
|
||||||
|
Compactions are IO intensive in that they may need to read multiple, large TSM files to rewrite them. The throughput of a compactions (MB/s) as well as the latency for each compaction is important to keep consistent even as data sizes grow.
|
||||||
|
|
||||||
|
The performance of compactions also has an effect on what data is visible during queries. If the Cache fills up and evicts old entries faster than the compactions can process old WAL files, queries could return return gaps until compactions catch up.
|
||||||
|
|
||||||
|
To address these concerns, compactions prioritize old WAL files over optimizing storage/compression to avoid data being hidden overload situations. This also accounts for the fact that shards will eventually become cold for writes so that existing data will be able to be optimized. To maintain consistent performance, the number of each type of file processed as well as the size of each file processed is bounded.
|
||||||
|
|
||||||
|
### Memory Footprint
|
||||||
|
|
||||||
|
The memory footprint should shoud not grow unbounded due to additional files or series keys of large sizes or numbers. Some options for addressing this concern is covered in the [Design Options] section.
|
||||||
|
|
||||||
|
## Concurrency
|
||||||
|
|
||||||
|
The main concern with concurrency is that reads and writes should not block each other. Writes add entries to the Cache and append entries to the WAL. During queries, the contention points will be the Cache and existing TSM files. Since the Cache and TSM file data is only access through the engine by the cursors, several strategies can be used to improve concurrency.
|
||||||
|
|
||||||
|
1. Cache series data can be returned to cursors as a copy. Since cache entries are evicted on writes, cursors iteration and writes to the same series could block each other. Iterating over copies of the values can relieve some of this contention.
|
||||||
|
2. TSM data values returned by the engine are new references to Values and not access to the actual TSM files. This means that the `Engine`, through the `FileStore` can limit contention.
|
||||||
|
3. Compactions are the only place where new TSM files are added and removed. Since this is a serial, continously running process, file contention is minimized.
|
||||||
|
|
||||||
|
## Robustness
|
||||||
|
|
||||||
|
The two robustness concerns considered by this design are writes filling the cache and crash recovery.
|
||||||
|
|
||||||
|
Writes filling up cache faster than the WAL segments can be processed result in the oldest entries being evicted from the cache. This is the normal operation for the cache. Old entries are always evicited to make room for new entries. In the case where WAL segements are slow to be processed, writes are not blocked or errored so timeouts should not occur due to IO issues. A side effect of this is that queries for recent data will always be served from memory. The size of the in-memory cache can also be tuned so that if IO does because a bottleneck the window of time for queries with recent data can be tuned.
|
||||||
|
|
||||||
|
Crash recovery is handled by using copy-on-write style updates along with checkpoint marker files. Existing data is never updated. Updates and deletes to existing data are recored as new changes and processed at compaction and query time.
|
||||||
|
|
666
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/data_file.go
generated
vendored
Normal file
666
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/data_file.go
generated
vendored
Normal file
|
@ -0,0 +1,666 @@
|
||||||
|
package tsm1
|
||||||
|
|
||||||
|
/*
|
||||||
|
A TSM file is composed for four sections: header, blocks, index and the footer.
|
||||||
|
|
||||||
|
┌────────┬────────────────────────────────────┬─────────────┬──────────────┐
|
||||||
|
│ Header │ Blocks │ Index │ Footer │
|
||||||
|
│5 bytes │ N bytes │ N bytes │ 4 bytes │
|
||||||
|
└────────┴────────────────────────────────────┴─────────────┴──────────────┘
|
||||||
|
|
||||||
|
Header is composed of a magic number to identify the file type and a version
|
||||||
|
number.
|
||||||
|
|
||||||
|
┌───────────────────┐
|
||||||
|
│ Header │
|
||||||
|
├─────────┬─────────┤
|
||||||
|
│ Magic │ Version │
|
||||||
|
│ 4 bytes │ 1 byte │
|
||||||
|
└─────────┴─────────┘
|
||||||
|
|
||||||
|
Blocks are sequences of pairs of CRC32 and data. The block data is opaque to the
|
||||||
|
file. The CRC32 is used for block level error detection. The length of the blocks
|
||||||
|
is stored in the index.
|
||||||
|
|
||||||
|
┌───────────────────────────────────────────────────────────┐
|
||||||
|
│ Blocks │
|
||||||
|
├───────────────────┬───────────────────┬───────────────────┤
|
||||||
|
│ Block 1 │ Block 2 │ Block N │
|
||||||
|
├─────────┬─────────┼─────────┬─────────┼─────────┬─────────┤
|
||||||
|
│ CRC │ Data │ CRC │ Data │ CRC │ Data │
|
||||||
|
│ 4 bytes │ N bytes │ 4 bytes │ N bytes │ 4 bytes │ N bytes │
|
||||||
|
└─────────┴─────────┴─────────┴─────────┴─────────┴─────────┘
|
||||||
|
|
||||||
|
Following the blocks is the index for the blocks in the file. The index is
|
||||||
|
composed of a sequence of index entries ordered lexicographically by key and
|
||||||
|
then by time. Each index entry starts with a key length and key followed by a
|
||||||
|
count of the number of blocks in the file. Each block entry is composed of
|
||||||
|
the min and max time for the block, the offset into the file where the block
|
||||||
|
is located and the the size of the block.
|
||||||
|
|
||||||
|
The index structure can provide efficient access to all blocks as well as the
|
||||||
|
ability to determine the cost associated with acessing a given key. Given a key
|
||||||
|
and timestamp, we can determine whether a file contains the block for that
|
||||||
|
timestamp as well as where that block resides and how much data to read to
|
||||||
|
retrieve the block. If we know we need to read all or multiple blocks in a
|
||||||
|
file, we can use the size to determine how much to read in a given IO.
|
||||||
|
|
||||||
|
┌──────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ Index │
|
||||||
|
├─────────┬─────────┬───────┬─────────┬─────────┬─────────┬─────────┬──────┤
|
||||||
|
│ Key Len │ Key │ Count │Min Time │Max Time │ Offset │ Size │ ... │
|
||||||
|
│ 2 bytes │ N bytes │2 bytes│ 8 bytes │ 8 bytes │ 8 bytes │ 4 bytes │ │
|
||||||
|
└─────────┴─────────┴───────┴─────────┴─────────┴─────────┴─────────┴──────┘
|
||||||
|
|
||||||
|
The last section is the footer that stores the offset of the start of the index.
|
||||||
|
|
||||||
|
┌─────────┐
|
||||||
|
│ Footer │
|
||||||
|
├─────────┤
|
||||||
|
│Index Ofs│
|
||||||
|
│ 8 bytes │
|
||||||
|
└─────────┘
|
||||||
|
*/
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"hash/crc32"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MagicNumber is written as the first 4 bytes of a data file to
|
||||||
|
// identify the file as a tsm1 formatted file
|
||||||
|
MagicNumber uint32 = 0x16D116D1
|
||||||
|
|
||||||
|
Version byte = 1
|
||||||
|
|
||||||
|
indexEntrySize = 28
|
||||||
|
)
|
||||||
|
|
||||||
|
// TSMWriter writes TSM formatted key and values.
|
||||||
|
type TSMWriter interface {
|
||||||
|
// Write writes a new block for key containing and values. Writes append
|
||||||
|
// blocks in the order that the Write function is called. The caller is
|
||||||
|
// responsible for ensuring keys and blocks or sorted appropriately.
|
||||||
|
// Values are encoded as a full block. The caller is responsible for
|
||||||
|
// ensuring a fixed number of values are encoded in each block as wells as
|
||||||
|
// ensuring the Values are sorted. The first and last timestamp values are
|
||||||
|
// used as the minimum and maximum values for the index entry.
|
||||||
|
Write(key string, values Values) error
|
||||||
|
|
||||||
|
// Close finishes the TSM write streams and writes the index.
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// TSMIndex represent the index section of a TSM file. The index records all
|
||||||
|
// blocks, their locations, sizes, min and max times.
|
||||||
|
type TSMIndex interface {
|
||||||
|
|
||||||
|
// Add records a new block entry for a key in the index.
|
||||||
|
Add(key string, minTime, maxTime time.Time, offset int64, size uint32)
|
||||||
|
|
||||||
|
// Entries returns all index entries for a key.
|
||||||
|
Entries(key string) []*IndexEntry
|
||||||
|
|
||||||
|
// Entry returns the index entry for the specified key and timestamp. If no entry
|
||||||
|
// matches the key and timestamp, nil is returned.
|
||||||
|
Entry(key string, timestamp time.Time) *IndexEntry
|
||||||
|
|
||||||
|
// MarshalBinary returns a byte slice encoded version of the index.
|
||||||
|
MarshalBinary() ([]byte, error)
|
||||||
|
|
||||||
|
// UnmarshalBinary populates an index from an encoded byte slice
|
||||||
|
// representation of an index.
|
||||||
|
UnmarshalBinary(b []byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexEntry is the index information for a given block in a TSM file.
|
||||||
|
type IndexEntry struct {
|
||||||
|
|
||||||
|
// The min and max time of all points stored in the block.
|
||||||
|
MinTime, MaxTime time.Time
|
||||||
|
|
||||||
|
// The absolute position in the file where this block is located.
|
||||||
|
Offset int64
|
||||||
|
|
||||||
|
// The size in bytes of the block in the file.
|
||||||
|
Size uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *IndexEntry) UnmarshalBinary(b []byte) error {
|
||||||
|
if len(b) != indexEntrySize {
|
||||||
|
return fmt.Errorf("unmarshalBinary: short buf: %v != %v", indexEntrySize, len(b))
|
||||||
|
}
|
||||||
|
e.MinTime = time.Unix(0, int64(btou64(b[:8])))
|
||||||
|
e.MaxTime = time.Unix(0, int64(btou64(b[8:16])))
|
||||||
|
e.Offset = int64(btou64(b[16:24]))
|
||||||
|
e.Size = btou32(b[24:28])
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if this IndexEntry may contain values for the given time. The min and max
|
||||||
|
// times are inclusive.
|
||||||
|
func (e *IndexEntry) Contains(t time.Time) bool {
|
||||||
|
return e.MinTime.Equal(t) || e.MinTime.Before(t) &&
|
||||||
|
e.MaxTime.Equal(t) || e.MaxTime.After(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDirectIndex() TSMIndex {
|
||||||
|
return &directIndex{
|
||||||
|
blocks: map[string]indexEntries{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// directIndex is a simple in-memory index implementation for a TSM file. The full index
|
||||||
|
// must fit in memory.
|
||||||
|
type directIndex struct {
|
||||||
|
blocks map[string]indexEntries
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *directIndex) Add(key string, minTime, maxTime time.Time, offset int64, size uint32) {
|
||||||
|
d.blocks[key] = append(d.blocks[key], &IndexEntry{
|
||||||
|
MinTime: minTime,
|
||||||
|
MaxTime: maxTime,
|
||||||
|
Offset: offset,
|
||||||
|
Size: size,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *directIndex) Entries(key string) []*IndexEntry {
|
||||||
|
return d.blocks[key]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *directIndex) Entry(key string, t time.Time) *IndexEntry {
|
||||||
|
entries := d.Entries(key)
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.Contains(t) {
|
||||||
|
return entry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *directIndex) addEntries(key string, entries indexEntries) {
|
||||||
|
d.blocks[key] = append(d.blocks[key], entries...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *directIndex) Write(w io.Writer) error {
|
||||||
|
b, err := d.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("write: marshal error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write out the index bytes
|
||||||
|
_, err = w.Write(b)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("write: writer error: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *directIndex) MarshalBinary() ([]byte, error) {
|
||||||
|
// Index blocks are writtens sorted by key
|
||||||
|
var keys []string
|
||||||
|
for k := range d.blocks {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
|
||||||
|
// Buffer to build up the index and write in bulk
|
||||||
|
var b []byte
|
||||||
|
|
||||||
|
// For each key, individual entries are sorted by time
|
||||||
|
for _, key := range keys {
|
||||||
|
entries := d.blocks[key]
|
||||||
|
sort.Sort(entries)
|
||||||
|
|
||||||
|
// Append the key length and key
|
||||||
|
b = append(b, u16tob(uint16(len(key)))...)
|
||||||
|
b = append(b, key...)
|
||||||
|
|
||||||
|
// Append the index block count
|
||||||
|
b = append(b, u16tob(uint16(len(entries)))...)
|
||||||
|
|
||||||
|
// Append each index entry for all blocks for this key
|
||||||
|
for _, entry := range entries {
|
||||||
|
b = append(b, u64tob(uint64(entry.MinTime.UnixNano()))...)
|
||||||
|
b = append(b, u64tob(uint64(entry.MaxTime.UnixNano()))...)
|
||||||
|
b = append(b, u64tob(uint64(entry.Offset))...)
|
||||||
|
b = append(b, u32tob(entry.Size)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *directIndex) UnmarshalBinary(b []byte) error {
|
||||||
|
var pos int
|
||||||
|
for pos < len(b) {
|
||||||
|
n, key, err := d.readKey(b[pos:])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("readIndex: read key error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pos += n
|
||||||
|
n, entries, err := d.readEntries(b[pos:])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("readIndex: read entries error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pos += n
|
||||||
|
d.addEntries(key, entries)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *directIndex) readKey(b []byte) (n int, key string, err error) {
|
||||||
|
// 2 byte size of key
|
||||||
|
n, size := 2, int(btou16(b[:2]))
|
||||||
|
|
||||||
|
// N byte key
|
||||||
|
key = string(b[n : n+size])
|
||||||
|
n += len(key)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *directIndex) readEntries(b []byte) (n int, entries indexEntries, err error) {
|
||||||
|
// 2 byte count of index entries
|
||||||
|
n, count := 2, int(btou16(b[:2]))
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
ie := &IndexEntry{}
|
||||||
|
if err := ie.UnmarshalBinary(b[i*indexEntrySize+2 : i*indexEntrySize+2+indexEntrySize]); err != nil {
|
||||||
|
return 0, nil, fmt.Errorf("readEntries: unmarshal error: %v", err)
|
||||||
|
}
|
||||||
|
entries = append(entries, ie)
|
||||||
|
n += indexEntrySize
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// indirectIndex is a TSMIndex that uses a raw byte slice representation of an index. This
|
||||||
|
// implementation can be used for indexes that may be MMAPed into memory.
|
||||||
|
type indirectIndex struct {
|
||||||
|
// indirectIndex works a follows. Assuming we have an index structure in memory as
|
||||||
|
// the diagram below:
|
||||||
|
//
|
||||||
|
// ┌────────────────────────────────────────────────────────────────────┐
|
||||||
|
// │ Index │
|
||||||
|
// ├─┬──────────────────────┬──┬───────────────────────┬───┬────────────┘
|
||||||
|
// │0│ │62│ │145│
|
||||||
|
// ├─┴───────┬─────────┬────┼──┴──────┬─────────┬──────┼───┴─────┬──────┐
|
||||||
|
// │Key 1 Len│ Key │... │Key 2 Len│ Key 2 │ ... │ Key 3 │ ... │
|
||||||
|
// │ 2 bytes │ N bytes │ │ 2 bytes │ N bytes │ │ 2 bytes │ │
|
||||||
|
// └─────────┴─────────┴────┴─────────┴─────────┴──────┴─────────┴──────┘
|
||||||
|
|
||||||
|
// We would build an `offsets` slices where each element pointers to the byte location
|
||||||
|
// for the first key in the index slice.
|
||||||
|
|
||||||
|
// ┌────────────────────────────────────────────────────────────────────┐
|
||||||
|
// │ Offsets │
|
||||||
|
// ├────┬────┬────┬─────────────────────────────────────────────────────┘
|
||||||
|
// │ 0 │ 62 │145 │
|
||||||
|
// └────┴────┴────┘
|
||||||
|
|
||||||
|
// Using this offset slice we can find `Key 2` by doing a binary search
|
||||||
|
// over the offsets slice. Instead of comparing the value in the offsets
|
||||||
|
// (e.g. `62`), we use that as an index into the underlying index to
|
||||||
|
// retrieve the key at postion `62` and perform our comparisons with that.
|
||||||
|
|
||||||
|
// When we have identified the correct position in the index for a given
|
||||||
|
// key, we could perform another binary search or a linear scan. This
|
||||||
|
// should be fast as well since each index entry is 28 bytes and all
|
||||||
|
// contiguous in memory. The current implementation uses a linear scan since the
|
||||||
|
// number of block entries is expected to be < 100 per key.
|
||||||
|
|
||||||
|
// b is the underlying index byte slice. This could be a copy on the heap or an MMAP
|
||||||
|
// slice reference
|
||||||
|
b []byte
|
||||||
|
|
||||||
|
// offsets contains the positions in b for each key. It points to the 2 byte length of
|
||||||
|
// key.
|
||||||
|
offsets []int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewIndirectIndex() TSMIndex {
|
||||||
|
return &indirectIndex{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add records a new block entry for a key in the index.
|
||||||
|
func (d *indirectIndex) Add(key string, minTime, maxTime time.Time, offset int64, size uint32) {
|
||||||
|
panic("unsupported operation")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entries returns all index entries for a key.
|
||||||
|
func (d *indirectIndex) Entries(key string) []*IndexEntry {
|
||||||
|
// We use a binary search across our indirect offsets (pointers to all the keys
|
||||||
|
// in the index slice).
|
||||||
|
i := sort.Search(len(d.offsets), func(i int) bool {
|
||||||
|
// i is the position in offsets we are at so get offset it points to
|
||||||
|
offset := d.offsets[i]
|
||||||
|
|
||||||
|
// It's pointing to the start of the key which is a 2 byte length
|
||||||
|
keyLen := int32(btou16(d.b[offset : offset+2]))
|
||||||
|
|
||||||
|
// Now get the actual key bytes and convert to string
|
||||||
|
k := string(d.b[offset+2 : offset+2+keyLen])
|
||||||
|
|
||||||
|
// See if it matches
|
||||||
|
return key == k || k > key
|
||||||
|
})
|
||||||
|
|
||||||
|
// See if we might have found the right index
|
||||||
|
if i < len(d.offsets) {
|
||||||
|
ofs := d.offsets[i]
|
||||||
|
n, k, err := d.readKey(d.b[ofs:])
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("error reading key: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// The search may have returned an i == 0 which could indicated that the value
|
||||||
|
// searched should be inserted at postion 0. Make sure the key in the index
|
||||||
|
// matches the search value.
|
||||||
|
if k != key {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read and return all the entries
|
||||||
|
ofs += int32(n)
|
||||||
|
_, entries, err := d.readEntries(d.b[ofs:])
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("error reading entries: %v", err))
|
||||||
|
|
||||||
|
}
|
||||||
|
return entries
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// The key is not in the index. i is the index where it would be inserted.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entry returns the index entry for the specified key and timestamp. If no entry
|
||||||
|
// matches the key an timestamp, nil is returned.
|
||||||
|
func (d *indirectIndex) Entry(key string, timestamp time.Time) *IndexEntry {
|
||||||
|
entries := d.Entries(key)
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.Contains(timestamp) {
|
||||||
|
return entry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary returns a byte slice encoded version of the index.
|
||||||
|
func (d *indirectIndex) MarshalBinary() ([]byte, error) {
|
||||||
|
return d.b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary populates an index from an encoded byte slice
|
||||||
|
// representation of an index.
|
||||||
|
func (d *indirectIndex) UnmarshalBinary(b []byte) error {
|
||||||
|
// Keep a reference to the actual index bytes
|
||||||
|
d.b = b
|
||||||
|
|
||||||
|
// To create our "indirect" index, we need to find he location of all the keys in
|
||||||
|
// the raw byte slice. The keys are listed once each (in sorted order). Following
|
||||||
|
// each key is a time ordered list of index entry blocks for that key. The loop below
|
||||||
|
// basically skips across the slice keeping track of the counter when we are at a key
|
||||||
|
// field.
|
||||||
|
var i int32
|
||||||
|
for i < int32(len(b)) {
|
||||||
|
d.offsets = append(d.offsets, i)
|
||||||
|
keyLen := int32(btou16(b[i : i+2]))
|
||||||
|
// Skip to the start of the key
|
||||||
|
i += 2
|
||||||
|
|
||||||
|
// Skip over the key
|
||||||
|
i += keyLen
|
||||||
|
|
||||||
|
// Count of all the index blocks for this key
|
||||||
|
count := int32(btou16(b[i : i+2]))
|
||||||
|
|
||||||
|
// Skip the count bytes
|
||||||
|
i += 2
|
||||||
|
|
||||||
|
// Skip over all the blocks
|
||||||
|
i += count * indexEntrySize
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *indirectIndex) readKey(b []byte) (n int, key string, err error) {
|
||||||
|
// 2 byte size of key
|
||||||
|
n, size := 2, int(btou16(b[:2]))
|
||||||
|
|
||||||
|
// N byte key
|
||||||
|
key = string(b[n : n+size])
|
||||||
|
n += len(key)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *indirectIndex) readEntries(b []byte) (n int, entries indexEntries, err error) {
|
||||||
|
// 2 byte count of index entries
|
||||||
|
n, count := 2, int(btou16(b[:2]))
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
ie := &IndexEntry{}
|
||||||
|
if err := ie.UnmarshalBinary(b[i*indexEntrySize+2 : i*indexEntrySize+2+indexEntrySize]); err != nil {
|
||||||
|
return 0, nil, fmt.Errorf("readEntries: unmarshal error: %v", err)
|
||||||
|
}
|
||||||
|
entries = append(entries, ie)
|
||||||
|
n += indexEntrySize
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// tsmWriter writes keys and values in the TSM format
|
||||||
|
type tsmWriter struct {
|
||||||
|
w io.Writer
|
||||||
|
index TSMIndex
|
||||||
|
n int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTSMWriter(w io.Writer) (TSMWriter, error) {
|
||||||
|
n, err := w.Write(append(u32tob(MagicNumber), Version))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
index := &directIndex{
|
||||||
|
blocks: map[string]indexEntries{},
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tsmWriter{w: w, index: index, n: int64(n)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tsmWriter) Write(key string, values Values) error {
|
||||||
|
block, err := values.Encode(nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
checksum := crc32.ChecksumIEEE(block)
|
||||||
|
|
||||||
|
n, err := t.w.Write(append(u32tob(checksum), block...))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record this block in index
|
||||||
|
t.index.Add(key, values[0].Time(), values[len(values)-1].Time(), t.n, uint32(n))
|
||||||
|
|
||||||
|
// Increment file position pointer
|
||||||
|
t.n += int64(n)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tsmWriter) Close() error {
|
||||||
|
indexPos := t.n
|
||||||
|
|
||||||
|
// Generate the index bytes
|
||||||
|
b, err := t.index.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the index followed by index position
|
||||||
|
_, err = t.w.Write(append(b, u64tob(uint64(indexPos))...))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type tsmReader struct {
|
||||||
|
r io.ReadSeeker
|
||||||
|
indexStart, indexEnd int64
|
||||||
|
index TSMIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTSMReader(r io.ReadSeeker) (*tsmReader, error) {
|
||||||
|
t := &tsmReader{r: r}
|
||||||
|
if err := t.init(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tsmReader) init() error {
|
||||||
|
// Current the readers size
|
||||||
|
size, err := t.r.Seek(0, os.SEEK_END)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("init: failed to seek: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.indexEnd = size - 8
|
||||||
|
|
||||||
|
// Seek to index location pointer
|
||||||
|
_, err = t.r.Seek(-8, os.SEEK_END)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("init: failed to seek to index ptr: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the absolute position of the start of the index
|
||||||
|
b := make([]byte, 8)
|
||||||
|
_, err = t.r.Read(b)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("init: failed to read index ptr: %v", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
t.indexStart = int64(btou64(b))
|
||||||
|
|
||||||
|
_, err = t.r.Seek(t.indexStart, os.SEEK_SET)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("init: failed to seek to index: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
b = make([]byte, t.indexEnd-t.indexStart)
|
||||||
|
t.index = &directIndex{
|
||||||
|
blocks: map[string]indexEntries{},
|
||||||
|
}
|
||||||
|
_, err = t.r.Read(b)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("init: read index: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := t.index.UnmarshalBinary(b); err != nil {
|
||||||
|
return fmt.Errorf("init: unmarshal error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tsmReader) Read(key string, timestamp time.Time) ([]Value, error) {
|
||||||
|
block := t.index.Entry(key, timestamp)
|
||||||
|
if block == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: remove this allocation
|
||||||
|
b := make([]byte, 16*1024)
|
||||||
|
_, err := t.r.Seek(block.Offset, os.SEEK_SET)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if int(block.Size) > len(b) {
|
||||||
|
b = make([]byte, block.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := t.r.Read(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: Validate checksum
|
||||||
|
var values []Value
|
||||||
|
err = DecodeBlock(b[4:n], &values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAll returns all values for a key in all blocks.
|
||||||
|
func (t *tsmReader) ReadAll(key string) ([]Value, error) {
|
||||||
|
var values []Value
|
||||||
|
blocks := t.index.Entries(key)
|
||||||
|
if len(blocks) == 0 {
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var temp []Value
|
||||||
|
// TODO: we can determine the max block size when loading the file create/re-use
|
||||||
|
// a reader level buf then.
|
||||||
|
b := make([]byte, 16*1024)
|
||||||
|
for _, block := range blocks {
|
||||||
|
_, err := t.r.Seek(block.Offset, os.SEEK_SET)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if int(block.Size) > len(b) {
|
||||||
|
b = make([]byte, block.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := t.r.Read(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: Validate checksum
|
||||||
|
temp = temp[:0]
|
||||||
|
err = DecodeBlock(b[4:n], &temp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
values = append(values, temp...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexEntries []*IndexEntry
|
||||||
|
|
||||||
|
func (a indexEntries) Len() int { return len(a) }
|
||||||
|
func (a indexEntries) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
func (a indexEntries) Less(i, j int) bool { return a[i].MinTime.UnixNano() < a[j].MinTime.UnixNano() }
|
||||||
|
|
||||||
|
func u16tob(v uint16) []byte {
|
||||||
|
b := make([]byte, 2)
|
||||||
|
binary.BigEndian.PutUint16(b, v)
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func btou16(b []byte) uint16 {
|
||||||
|
return uint16(binary.BigEndian.Uint16(b))
|
||||||
|
}
|
426
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/data_file_test.go
generated
vendored
Normal file
426
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/data_file_test.go
generated
vendored
Normal file
|
@ -0,0 +1,426 @@
|
||||||
|
package tsm1_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/tsdb/engine/tsm1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTSMWriter_Write_Empty(t *testing.T) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
w, err := tsm1.NewTSMWriter(&b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error created writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
t.Fatalf("unexpeted error closing: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got, exp := len(b.Bytes()), 5; got < exp {
|
||||||
|
t.Fatalf("file size mismatch: got %v, exp %v", got, exp)
|
||||||
|
}
|
||||||
|
if got := binary.BigEndian.Uint32(b.Bytes()[0:4]); got != tsm1.MagicNumber {
|
||||||
|
t.Fatalf("magic number mismatch: got %v, exp %v", got, tsm1.MagicNumber)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTSMWriter_Write_Single(t *testing.T) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
w, err := tsm1.NewTSMWriter(&b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
values := []tsm1.Value{tsm1.NewValue(time.Unix(0, 0), 1.0)}
|
||||||
|
if err := w.Write("cpu", values); err != nil {
|
||||||
|
t.Fatalf("unexpeted error writing: %v", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
t.Fatalf("unexpeted error closing: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got, exp := len(b.Bytes()), 5; got < exp {
|
||||||
|
t.Fatalf("file size mismatch: got %v, exp %v", got, exp)
|
||||||
|
}
|
||||||
|
if got := binary.BigEndian.Uint32(b.Bytes()[0:4]); got != tsm1.MagicNumber {
|
||||||
|
t.Fatalf("magic number mismatch: got %v, exp %v", got, tsm1.MagicNumber)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := tsm1.NewTSMReader(bytes.NewReader(b.Bytes()))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error created reader: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
readValues, err := r.ReadAll("cpu")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpeted error readin: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(readValues) != len(values) {
|
||||||
|
t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), len(values))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range values {
|
||||||
|
if v.Value() != readValues[i].Value() {
|
||||||
|
t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTSMWriter_Write_Multiple(t *testing.T) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
w, err := tsm1.NewTSMWriter(&b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var data = []struct {
|
||||||
|
key string
|
||||||
|
values []tsm1.Value
|
||||||
|
}{
|
||||||
|
{"cpu", []tsm1.Value{tsm1.NewValue(time.Unix(0, 0), 1.0)}},
|
||||||
|
{"mem", []tsm1.Value{tsm1.NewValue(time.Unix(1, 0), 2.0)}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range data {
|
||||||
|
if err := w.Write(d.key, d.values); err != nil {
|
||||||
|
t.Fatalf("unexpeted error writing: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
t.Fatalf("unexpeted error closing: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := tsm1.NewTSMReader(bytes.NewReader(b.Bytes()))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error created reader: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range data {
|
||||||
|
readValues, err := r.ReadAll(d.key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpeted error readin: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := len(d.values); exp != len(readValues) {
|
||||||
|
t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range d.values {
|
||||||
|
if v.Value() != readValues[i].Value() {
|
||||||
|
t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTSMWriter_Write_MultipleKeyValues(t *testing.T) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
w, err := tsm1.NewTSMWriter(&b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var data = []struct {
|
||||||
|
key string
|
||||||
|
values []tsm1.Value
|
||||||
|
}{
|
||||||
|
{"cpu", []tsm1.Value{
|
||||||
|
tsm1.NewValue(time.Unix(0, 0), 1.0),
|
||||||
|
tsm1.NewValue(time.Unix(1, 0), 2.0)},
|
||||||
|
},
|
||||||
|
{"mem", []tsm1.Value{
|
||||||
|
tsm1.NewValue(time.Unix(0, 0), 1.5),
|
||||||
|
tsm1.NewValue(time.Unix(1, 0), 2.5)},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range data {
|
||||||
|
if err := w.Write(d.key, d.values); err != nil {
|
||||||
|
t.Fatalf("unexpeted error writing: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
t.Fatalf("unexpeted error closing: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := tsm1.NewTSMReader(bytes.NewReader(b.Bytes()))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error created reader: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range data {
|
||||||
|
readValues, err := r.ReadAll(d.key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpeted error readin: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := len(d.values); exp != len(readValues) {
|
||||||
|
t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range d.values {
|
||||||
|
if v.Value() != readValues[i].Value() {
|
||||||
|
t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that writing keys in reverse is able to read them back.
|
||||||
|
func TestTSMWriter_Write_ReverseKeys(t *testing.T) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
w, err := tsm1.NewTSMWriter(&b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var data = []struct {
|
||||||
|
key string
|
||||||
|
values []tsm1.Value
|
||||||
|
}{
|
||||||
|
{"mem", []tsm1.Value{
|
||||||
|
tsm1.NewValue(time.Unix(0, 0), 1.5),
|
||||||
|
tsm1.NewValue(time.Unix(1, 0), 2.5)},
|
||||||
|
},
|
||||||
|
{"cpu", []tsm1.Value{
|
||||||
|
tsm1.NewValue(time.Unix(0, 0), 1.0),
|
||||||
|
tsm1.NewValue(time.Unix(1, 0), 2.0)},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range data {
|
||||||
|
if err := w.Write(d.key, d.values); err != nil {
|
||||||
|
t.Fatalf("unexpeted error writing: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
t.Fatalf("unexpeted error closing: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := tsm1.NewTSMReader(bytes.NewReader(b.Bytes()))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error created reader: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range data {
|
||||||
|
readValues, err := r.ReadAll(d.key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpeted error readin: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := len(d.values); exp != len(readValues) {
|
||||||
|
t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range d.values {
|
||||||
|
if v.Value() != readValues[i].Value() {
|
||||||
|
t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that writing keys in reverse is able to read them back.
|
||||||
|
func TestTSMWriter_Write_SameKey(t *testing.T) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
w, err := tsm1.NewTSMWriter(&b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var data = []struct {
|
||||||
|
key string
|
||||||
|
values []tsm1.Value
|
||||||
|
}{
|
||||||
|
{"cpu", []tsm1.Value{
|
||||||
|
tsm1.NewValue(time.Unix(0, 0), 1.0),
|
||||||
|
tsm1.NewValue(time.Unix(1, 0), 2.0)},
|
||||||
|
},
|
||||||
|
{"cpu", []tsm1.Value{
|
||||||
|
tsm1.NewValue(time.Unix(2, 0), 3.0),
|
||||||
|
tsm1.NewValue(time.Unix(3, 0), 4.0)},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range data {
|
||||||
|
if err := w.Write(d.key, d.values); err != nil {
|
||||||
|
t.Fatalf("unexpeted error writing: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
t.Fatalf("unexpeted error closing: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := tsm1.NewTSMReader(bytes.NewReader(b.Bytes()))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error created reader: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
values := append(data[0].values, data[1].values...)
|
||||||
|
|
||||||
|
readValues, err := r.ReadAll("cpu")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpeted error readin: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := len(values); exp != len(readValues) {
|
||||||
|
t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range values {
|
||||||
|
if v.Value() != readValues[i].Value() {
|
||||||
|
t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that calling Read returns all the values for block matching the key
|
||||||
|
// and timestamp
|
||||||
|
func TestTSMWriter_Read_Multiple(t *testing.T) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
w, err := tsm1.NewTSMWriter(&b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var data = []struct {
|
||||||
|
key string
|
||||||
|
values []tsm1.Value
|
||||||
|
}{
|
||||||
|
{"cpu", []tsm1.Value{
|
||||||
|
tsm1.NewValue(time.Unix(0, 0), 1.0),
|
||||||
|
tsm1.NewValue(time.Unix(1, 0), 2.0)},
|
||||||
|
},
|
||||||
|
{"cpu", []tsm1.Value{
|
||||||
|
tsm1.NewValue(time.Unix(2, 0), 3.0),
|
||||||
|
tsm1.NewValue(time.Unix(3, 0), 4.0)},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range data {
|
||||||
|
if err := w.Write(d.key, d.values); err != nil {
|
||||||
|
t.Fatalf("unexpeted error writing: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
t.Fatalf("unexpeted error closing: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := tsm1.NewTSMReader(bytes.NewReader(b.Bytes()))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error created reader: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, values := range data {
|
||||||
|
// Try the first timestamp
|
||||||
|
readValues, err := r.Read("cpu", values.values[0].Time())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpeted error readin: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := len(values.values); exp != len(readValues) {
|
||||||
|
t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range values.values {
|
||||||
|
if v.Value() != readValues[i].Value() {
|
||||||
|
t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try the last timestamp too
|
||||||
|
readValues, err = r.Read("cpu", values.values[1].Time())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpeted error readin: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := len(values.values); exp != len(readValues) {
|
||||||
|
t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range values.values {
|
||||||
|
if v.Value() != readValues[i].Value() {
|
||||||
|
t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIndirectIndex_Entries(t *testing.T) {
|
||||||
|
index := tsm1.NewDirectIndex()
|
||||||
|
index.Add("cpu", time.Unix(0, 0), time.Unix(1, 0), 10, 100)
|
||||||
|
index.Add("cpu", time.Unix(2, 0), time.Unix(3, 0), 20, 200)
|
||||||
|
index.Add("mem", time.Unix(0, 0), time.Unix(1, 0), 10, 100)
|
||||||
|
|
||||||
|
b, err := index.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error marshaling index: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
indirect := tsm1.NewIndirectIndex()
|
||||||
|
if err := indirect.UnmarshalBinary(b); err != nil {
|
||||||
|
t.Fatalf("unexpected error unmarshaling index: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
exp := index.Entries("cpu")
|
||||||
|
entries := indirect.Entries("cpu")
|
||||||
|
|
||||||
|
if got, exp := len(entries), len(exp); got != exp {
|
||||||
|
t.Fatalf("entries length mismatch: got %v, exp %v", got, exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, exp := range exp {
|
||||||
|
got := entries[i]
|
||||||
|
if exp.MinTime != got.MinTime {
|
||||||
|
t.Fatalf("minTime mismatch: got %v, exp %v", got.MinTime, exp.MinTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp.MaxTime != got.MaxTime {
|
||||||
|
t.Fatalf("minTime mismatch: got %v, exp %v", got.MaxTime, exp.MaxTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp.Size != got.Size {
|
||||||
|
t.Fatalf("size mismatch: got %v, exp %v", got.Size, exp.Size)
|
||||||
|
}
|
||||||
|
if exp.Offset != got.Offset {
|
||||||
|
t.Fatalf("size mismatch: got %v, exp %v", got.Offset, exp.Offset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIndirectIndex_Entries_NonExistent(t *testing.T) {
|
||||||
|
index := tsm1.NewDirectIndex()
|
||||||
|
index.Add("cpu", time.Unix(0, 0), time.Unix(1, 0), 10, 100)
|
||||||
|
index.Add("cpu", time.Unix(2, 0), time.Unix(3, 0), 20, 200)
|
||||||
|
|
||||||
|
b, err := index.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error marshaling index: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
indirect := tsm1.NewIndirectIndex()
|
||||||
|
if err := indirect.UnmarshalBinary(b); err != nil {
|
||||||
|
t.Fatalf("unexpected error unmarshaling index: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mem has not been added to the index so we should get now entries back
|
||||||
|
// for both
|
||||||
|
exp := index.Entries("mem")
|
||||||
|
entries := indirect.Entries("mem")
|
||||||
|
|
||||||
|
if got, exp := len(entries), len(exp); got != exp && exp != 0 {
|
||||||
|
t.Fatalf("entries length mismatch: got %v, exp %v", got, exp)
|
||||||
|
}
|
||||||
|
}
|
|
@ -82,10 +82,6 @@ const (
|
||||||
|
|
||||||
// MAP_POPULATE is for the mmap syscall. For some reason this isn't defined in golang's syscall
|
// MAP_POPULATE is for the mmap syscall. For some reason this isn't defined in golang's syscall
|
||||||
MAP_POPULATE = 0x8000
|
MAP_POPULATE = 0x8000
|
||||||
|
|
||||||
// magicNumber is written as the first 4 bytes of a data file to
|
|
||||||
// identify the file as a tsm1 formatted file
|
|
||||||
magicNumber uint32 = 0x16D116D1
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ensure Engine implements the interface.
|
// Ensure Engine implements the interface.
|
||||||
|
@ -2302,7 +2298,7 @@ func openFileAndCheckpoint(fileName string) (*os.File, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// write the header, which is just the magic number
|
// write the header, which is just the magic number
|
||||||
if _, err := f.Write(u32tob(magicNumber)); err != nil {
|
if _, err := f.Write(u32tob(MagicNumber)); err != nil {
|
||||||
f.Close()
|
f.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,6 @@ import (
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
|
||||||
|
|
||||||
// "github.com/davecgh/go-spew/spew"
|
// "github.com/davecgh/go-spew/spew"
|
||||||
"github.com/influxdb/influxdb/influxql"
|
"github.com/influxdb/influxdb/influxql"
|
||||||
|
@ -1288,7 +1287,13 @@ func valueCompare(a, b interface{}) int {
|
||||||
d1, ok1 := a.(string)
|
d1, ok1 := a.(string)
|
||||||
d2, ok2 := b.(string)
|
d2, ok2 := b.(string)
|
||||||
if ok1 && ok2 {
|
if ok1 && ok2 {
|
||||||
return strings.Compare(d1, d2)
|
if d1 == d2 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if d1 > d2 {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return -1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
panic(fmt.Sprintf("unreachable code; types were %T, %T", a, b))
|
panic(fmt.Sprintf("unreachable code; types were %T, %T", a, b))
|
||||||
|
|
|
@ -66,6 +66,11 @@ func (i *InfluxDB) Connect() error {
|
||||||
for _, u := range urls {
|
for _, u := range urls {
|
||||||
switch {
|
switch {
|
||||||
case strings.HasPrefix(u, "udp"):
|
case strings.HasPrefix(u, "udp"):
|
||||||
|
parsed_url, err := url.Parse(u)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if i.UDPPayload == 0 {
|
if i.UDPPayload == 0 {
|
||||||
i.UDPPayload = client.UDPPayloadSize
|
i.UDPPayload = client.UDPPayloadSize
|
||||||
}
|
}
|
||||||
|
@ -80,7 +85,7 @@ func (i *InfluxDB) Connect() error {
|
||||||
default:
|
default:
|
||||||
// If URL doesn't start with "udp", assume HTTP client
|
// If URL doesn't start with "udp", assume HTTP client
|
||||||
c, err := client.NewHTTPClient(client.HTTPConfig{
|
c, err := client.NewHTTPClient(client.HTTPConfig{
|
||||||
Addr: parsed_url.String(),
|
Addr: u,
|
||||||
Username: i.Username,
|
Username: i.Username,
|
||||||
Password: i.Password,
|
Password: i.Password,
|
||||||
UserAgent: i.UserAgent,
|
UserAgent: i.UserAgent,
|
||||||
|
|
Loading…
Reference in New Issue