godep update: influxdb
This commit is contained in:
parent
97d4f9e0ff
commit
b15928c95e
|
@ -102,8 +102,8 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/influxdb/influxdb",
|
"ImportPath": "github.com/influxdb/influxdb",
|
||||||
"Comment": "v0.9.4-rc1-478-g73a630d",
|
"Comment": "v0.9.4-rc1-652-gd9f0413",
|
||||||
"Rev": "73a630dfa64003c27782a1b0a6b817e839c5c3ea"
|
"Rev": "d9f04132ef567bb9671690e4db226ff3dab9feb5"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/lib/pq",
|
"ImportPath": "github.com/lib/pq",
|
||||||
|
|
|
@ -34,6 +34,10 @@ influxdb
|
||||||
**/influxdb
|
**/influxdb
|
||||||
!**/influxdb/
|
!**/influxdb/
|
||||||
|
|
||||||
|
influx_inspect
|
||||||
|
**/influx_inspect
|
||||||
|
!**/influx_inspect/
|
||||||
|
|
||||||
/benchmark-tool
|
/benchmark-tool
|
||||||
/main
|
/main
|
||||||
/benchmark-storage
|
/benchmark-storage
|
||||||
|
@ -50,7 +54,6 @@ packages/
|
||||||
autom4te.cache/
|
autom4te.cache/
|
||||||
config.log
|
config.log
|
||||||
config.status
|
config.status
|
||||||
Makefile
|
|
||||||
|
|
||||||
# log file
|
# log file
|
||||||
influxdb.log
|
influxdb.log
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
## v0.9.5 [unreleased]
|
## v0.9.5 [unreleased]
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
- [#4098](https://github.com/influxdb/influxdb/issues/4098): Enable `golint` on the code base - uuid subpackage
|
||||||
- [#4141](https://github.com/influxdb/influxdb/pull/4141): Control whether each query should be logged
|
- [#4141](https://github.com/influxdb/influxdb/pull/4141): Control whether each query should be logged
|
||||||
- [#4065](https://github.com/influxdb/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex
|
- [#4065](https://github.com/influxdb/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex
|
||||||
- [#4140](https://github.com/influxdb/influxdb/pull/4140): Make storage engine configurable
|
- [#4140](https://github.com/influxdb/influxdb/pull/4140): Make storage engine configurable
|
||||||
|
@ -13,8 +14,16 @@
|
||||||
- [#4265](https://github.com/influxdb/influxdb/pull/4265): Add statistics for Hinted-Handoff
|
- [#4265](https://github.com/influxdb/influxdb/pull/4265): Add statistics for Hinted-Handoff
|
||||||
- [#4284](https://github.com/influxdb/influxdb/pull/4284): Add exponential backoff for hinted-handoff failures
|
- [#4284](https://github.com/influxdb/influxdb/pull/4284): Add exponential backoff for hinted-handoff failures
|
||||||
- [#4310](https://github.com/influxdb/influxdb/pull/4310): Support dropping non-Raft nodes. Work mostly by @corylanou
|
- [#4310](https://github.com/influxdb/influxdb/pull/4310): Support dropping non-Raft nodes. Work mostly by @corylanou
|
||||||
|
- [#4348](https://github.com/influxdb/influxdb/pull/4348): Public ApplyTemplate function for graphite parser.
|
||||||
|
- [#4178](https://github.com/influxdb/influxdb/pull/4178): Support fields in graphite parser. Thanks @roobert!
|
||||||
|
- [#4291](https://github.com/influxdb/influxdb/pull/4291): Added ALTER DATABASE RENAME. Thanks @linearb
|
||||||
|
- [#4409](https://github.com/influxdb/influxdb/pull/4409): wire up INTO queries.
|
||||||
|
- [#4379](https://github.com/influxdb/influxdb/pull/4379): Auto-create database for UDP input.
|
||||||
|
- [#4375](https://github.com/influxdb/influxdb/pull/4375): Add Subscriptions so data can be 'forked' out of InfluxDB to another third party.
|
||||||
|
- [#4459](https://github.com/influxdb/influxdb/pull/4459): Register with Enterprise service if token available.
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
|
- [#4389](https://github.com/influxdb/influxdb/pull/4389): Don't add a new segment file on each hinted-handoff purge cycle.
|
||||||
- [#4166](https://github.com/influxdb/influxdb/pull/4166): Fix parser error on invalid SHOW
|
- [#4166](https://github.com/influxdb/influxdb/pull/4166): Fix parser error on invalid SHOW
|
||||||
- [#3457](https://github.com/influxdb/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name
|
- [#3457](https://github.com/influxdb/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name
|
||||||
- [#4225](https://github.com/influxdb/influxdb/pull/4225): Always display diags in name-sorted order
|
- [#4225](https://github.com/influxdb/influxdb/pull/4225): Always display diags in name-sorted order
|
||||||
|
@ -36,13 +45,32 @@
|
||||||
- [#4263](https://github.com/influxdb/influxdb/issues/4263): derivative does not work when data is missing
|
- [#4263](https://github.com/influxdb/influxdb/issues/4263): derivative does not work when data is missing
|
||||||
- [#4293](https://github.com/influxdb/influxdb/pull/4293): Ensure shell is invoked when touching PID file. Thanks @christopherjdickson
|
- [#4293](https://github.com/influxdb/influxdb/pull/4293): Ensure shell is invoked when touching PID file. Thanks @christopherjdickson
|
||||||
- [#4296](https://github.com/influxdb/influxdb/pull/4296): Reject line protocol ending with '-'. Fixes [#4272](https://github.com/influxdb/influxdb/issues/4272)
|
- [#4296](https://github.com/influxdb/influxdb/pull/4296): Reject line protocol ending with '-'. Fixes [#4272](https://github.com/influxdb/influxdb/issues/4272)
|
||||||
- [#4333](https://github.com/influxdb/influxdb/pull/4333): Retry monitor storage creation and only on Leader.
|
- [#4333](https://github.com/influxdb/influxdb/pull/4333): Retry monitor storage creation and storage only on Leader.
|
||||||
- [#4276](https://github.com/influxdb/influxdb/issues/4276): Walk DropSeriesStatement & check for empty sources
|
- [#4276](https://github.com/influxdb/influxdb/issues/4276): Walk DropSeriesStatement & check for empty sources
|
||||||
|
- [#4465](https://github.com/influxdb/influxdb/pull/4465): Actually display a message if the CLI can't connect to the database.
|
||||||
- [#4342](https://github.com/influxdb/influxdb/pull/4342): Fix mixing aggregates and math with non-aggregates. Thanks @kostya-sh.
|
- [#4342](https://github.com/influxdb/influxdb/pull/4342): Fix mixing aggregates and math with non-aggregates. Thanks @kostya-sh.
|
||||||
- [#4349](https://github.com/influxdb/influxdb/issues/4349): If HH can't unmarshal a block, skip that block.
|
- [#4349](https://github.com/influxdb/influxdb/issues/4349): If HH can't unmarshal a block, skip that block.
|
||||||
- [#4354](https://github.com/influxdb/influxdb/pull/4353): Fully lock node queues during hinted handoff. Fixes one cause of missing data on clusters.
|
- [#4354](https://github.com/influxdb/influxdb/pull/4353): Fully lock node queues during hinted handoff. Fixes one cause of missing data on clusters.
|
||||||
- [#4357](https://github.com/influxdb/influxdb/issues/4357): Fix similar float values encoding overflow Thanks @dgryski!
|
- [#4357](https://github.com/influxdb/influxdb/issues/4357): Fix similar float values encoding overflow Thanks @dgryski!
|
||||||
- [#4344](https://github.com/influxdb/influxdb/issues/4344): Make client.Write default to client.precision if none is given.
|
- [#4344](https://github.com/influxdb/influxdb/issues/4344): Make client.Write default to client.precision if none is given.
|
||||||
|
- [#3429](https://github.com/influxdb/influxdb/issues/3429): Incorrect parsing of regex containing '/'
|
||||||
|
- [#4374](https://github.com/influxdb/influxdb/issues/4374): Add tsm1 quickcheck tests
|
||||||
|
- [#4377](https://github.com/influxdb/influxdb/pull/4377): Hinted handoff should not process dropped nodes
|
||||||
|
- [#4365](https://github.com/influxdb/influxdb/issues/4365): Prevent panic in DecodeSameTypeBlock
|
||||||
|
- [#4280](https://github.com/influxdb/influxdb/issues/4280): Only drop points matching WHERE clause
|
||||||
|
- [#4443](https://github.com/influxdb/influxdb/pull/4443): Fix race condition while listing store's shards. Fixes [#4442](https://github.com/influxdb/influxdb/issues/4442)
|
||||||
|
- [#4410](https://github.com/influxdb/influxdb/pull/4410): Fix infinite recursion in statement string(). Thanks @kostya-sh
|
||||||
|
- [#4360](https://github.com/influxdb/influxdb/issues/4360): Aggregate Selectors overwrite values during post-processing
|
||||||
|
- [#4421](https://github.com/influxdb/influxdb/issues/4421): Fix line protocol accepting tags with no values
|
||||||
|
- [#4434](https://github.com/influxdb/influxdb/pull/4434): Allow 'E' for scientific values. Fixes [#4433](https://github.com/influxdb/influxdb/issues/4433)
|
||||||
|
- [#4431](https://github.com/influxdb/influxdb/issues/4431): Add tsm1 WAL QuickCheck
|
||||||
|
- [#4438](https://github.com/influxdb/influxdb/pull/4438): openTSDB service shutdown fixes
|
||||||
|
- [#4447](https://github.com/influxdb/influxdb/pull/4447): Fixes to logrotate file. Thanks @linsomniac.
|
||||||
|
- [#3820](https://github.com/influxdb/influxdb/issues/3820): Fix js error in admin UI.
|
||||||
|
- [#4460](https://github.com/influxdb/influxdb/issues/4460): tsm1 meta lint
|
||||||
|
- [#4415](https://github.com/influxdb/influxdb/issues/4415): Selector (like max, min, first, etc) return a string instead of timestamp
|
||||||
|
- [#4472](https://github.com/influxdb/influxdb/issues/4472): Fix 'too many points in GROUP BY interval' error
|
||||||
|
- [#4475](https://github.com/influxdb/influxdb/issues/4475): Fix SHOW TAG VALUES error message.
|
||||||
|
|
||||||
## v0.9.4 [2015-09-14]
|
## v0.9.4 [2015-09-14]
|
||||||
|
|
||||||
|
|
|
@ -122,10 +122,7 @@ Retaining the directory structure `$GOPATH/src/github.com/influxdb` is necessary
|
||||||
Pre-commit checks
|
Pre-commit checks
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
We have a pre commit hook to make sure code is formatted properly
|
We have a pre-commit hook to make sure code is formatted properly and vetted before you commit any changes. We strongly recommend using the pre-commit hook to guard against accidentally committing unformatted code. To use the pre-commit hook, run the following:
|
||||||
and vetted before you commit any changes. We strongly recommend using the pre
|
|
||||||
commit hook to guard against accidentally committing unformatted
|
|
||||||
code. To use the pre-commit hook, run the following:
|
|
||||||
|
|
||||||
cd $GOPATH/src/github.com/influxdb/influxdb
|
cd $GOPATH/src/github.com/influxdb/influxdb
|
||||||
cp .hooks/pre-commit .git/hooks/
|
cp .hooks/pre-commit .git/hooks/
|
||||||
|
@ -229,11 +226,13 @@ When troubleshooting problems with CPU or memory the Go toolchain can be helpful
|
||||||
# start influx with profiling
|
# start influx with profiling
|
||||||
./influxd -cpuprofile influxd.prof
|
./influxd -cpuprofile influxd.prof
|
||||||
# run queries, writes, whatever you're testing
|
# run queries, writes, whatever you're testing
|
||||||
# open up pprof
|
# Quit out of influxd and influxd.prof will then be written.
|
||||||
go tool pprof influxd influxd.prof
|
# open up pprof to examine the profiling data.
|
||||||
|
go tool pprof ./influxd influxd.prof
|
||||||
# once inside run "web", opens up browser with the CPU graph
|
# once inside run "web", opens up browser with the CPU graph
|
||||||
# can also run "web <function name>" to zoom in. Or "list <function name>" to see specific lines
|
# can also run "web <function name>" to zoom in. Or "list <function name>" to see specific lines
|
||||||
```
|
```
|
||||||
|
Note that when you pass the binary to `go tool pprof` *you must specify the path to the binary*.
|
||||||
|
|
||||||
Continuous Integration testing
|
Continuous Integration testing
|
||||||
-----
|
-----
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
PACKAGES=$(shell find . -name '*.go' -print0 | xargs -0 -n1 dirname | sort --unique)
|
||||||
|
|
||||||
|
default:
|
||||||
|
|
||||||
|
metalint: deadcode cyclo aligncheck defercheck structcheck lint errcheck
|
||||||
|
|
||||||
|
deadcode:
|
||||||
|
@deadcode $(PACKAGES) 2>&1
|
||||||
|
|
||||||
|
cyclo:
|
||||||
|
@gocyclo -over 10 $(PACKAGES)
|
||||||
|
|
||||||
|
aligncheck:
|
||||||
|
@aligncheck $(PACKAGES)
|
||||||
|
|
||||||
|
defercheck:
|
||||||
|
@defercheck $(PACKAGES)
|
||||||
|
|
||||||
|
|
||||||
|
structcheck:
|
||||||
|
@structcheck $(PACKAGES)
|
||||||
|
|
||||||
|
lint:
|
||||||
|
@for pkg in $(PACKAGES); do golint $$pkg; done
|
||||||
|
|
||||||
|
errcheck:
|
||||||
|
@for pkg in $(PACKAGES); do \
|
||||||
|
errcheck -ignorepkg=bytes,fmt -ignore=":(Rollback|Close)" $$pkg \
|
||||||
|
done
|
||||||
|
|
||||||
|
tools:
|
||||||
|
go get github.com/remyoudompheng/go-misc/deadcode
|
||||||
|
go get github.com/alecthomas/gocyclo
|
||||||
|
go get github.com/opennota/check/...
|
||||||
|
go get github.com/golang/lint/golint
|
||||||
|
go get github.com/kisielk/errcheck
|
||||||
|
|
||||||
|
.PHONY: default,metalint,deadcode,cyclo,aligncheck,defercheck,structcheck,lint,errcheck,tools
|
|
@ -1,4 +1,4 @@
|
||||||
The top level name is called a measurement. These names can contain any characters. Then there are field names, field values, tag keys and tag values, which can also contain any characters. However, if the measurement, field, or tag contains any character other than [A-Z,a-z,0-9,_], or if it starts with a digit, it must be double-quoted. Therefore anywhere a measurement name, field name, field value, tag name, or tag value appears it should be wrapped in double quotes.
|
The top level name is called a measurement. These names can contain any characters. Then there are field names, field values, tag keys and tag values, which can also contain any characters. However, if the measurement, field, or tag contains any character other than [A-Z,a-z,0-9,_], or if it starts with a digit, it must be double-quoted. Therefore anywhere a measurement name, field key, or tag key appears it should be wrapped in double quotes.
|
||||||
|
|
||||||
# Databases & retention policies
|
# Databases & retention policies
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,13 @@
|
||||||
# InfluxDB Client
|
# InfluxDB Client
|
||||||
|
|
||||||
[![GoDoc](https://godoc.org/github.com/influxdb/influxdb?status.svg)](http://godoc.org/github.com/influxdb/influxdb/client)
|
[![GoDoc](https://godoc.org/github.com/influxdb/influxdb?status.svg)](http://godoc.org/github.com/influxdb/influxdb/client/v2)
|
||||||
|
|
||||||
## Description
|
## Description
|
||||||
|
|
||||||
|
**NOTE:** The Go client library now has a "v2" version, with the old version
|
||||||
|
being deprecated. The new version can be imported at
|
||||||
|
`import "github.com/influxdb/influxdb/client/v2"`. It is not backwards-compatible.
|
||||||
|
|
||||||
A Go client library written and maintained by the **InfluxDB** team.
|
A Go client library written and maintained by the **InfluxDB** team.
|
||||||
This package provides convenience functions to read and write time series data.
|
This package provides convenience functions to read and write time series data.
|
||||||
It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
|
It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
|
||||||
|
@ -14,8 +18,8 @@ It uses the HTTP protocol to communicate with your **InfluxDB** cluster.
|
||||||
### Connecting To Your Database
|
### Connecting To Your Database
|
||||||
|
|
||||||
Connecting to an **InfluxDB** database is straightforward. You will need a host
|
Connecting to an **InfluxDB** database is straightforward. You will need a host
|
||||||
name, a port and the cluster user credentials if applicable. The default port is 8086.
|
name, a port and the cluster user credentials if applicable. The default port is
|
||||||
You can customize these settings to your specific installation via the
|
8086. You can customize these settings to your specific installation via the
|
||||||
**InfluxDB** configuration file.
|
**InfluxDB** configuration file.
|
||||||
|
|
||||||
Thought not necessary for experimentation, you may want to create a new user
|
Thought not necessary for experimentation, you may want to create a new user
|
||||||
|
@ -44,43 +48,49 @@ the configuration below.
|
||||||
```go
|
```go
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import "github.com/influxdb/influxdb/client"
|
import
|
||||||
import (
|
import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
MyHost = "localhost"
|
|
||||||
MyPort = 8086
|
|
||||||
MyDB = "square_holes"
|
MyDB = "square_holes"
|
||||||
MyMeasurement = "shapes"
|
username = "bubba"
|
||||||
|
password = "bumblebeetuna"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
u, err := url.Parse(fmt.Sprintf("http://%s:%d", MyHost, MyPort))
|
// Make client
|
||||||
if err != nil {
|
u, _ := url.Parse("http://localhost:8086")
|
||||||
log.Fatal(err)
|
c := client.NewClient(client.Config{
|
||||||
}
|
URL: u,
|
||||||
|
Username: username,
|
||||||
|
Password: password,
|
||||||
|
})
|
||||||
|
|
||||||
conf := client.Config{
|
// Create a new point batch
|
||||||
URL: *u,
|
bp := client.NewBatchPoints(client.BatchPointsConfig{
|
||||||
Username: os.Getenv("INFLUX_USER"),
|
Database: MyDB,
|
||||||
Password: os.Getenv("INFLUX_PWD"),
|
Precision: "s",
|
||||||
}
|
})
|
||||||
|
|
||||||
con, err := client.NewClient(conf)
|
// Create a point and add to batch
|
||||||
if err != nil {
|
tags := map[string]string{"cpu": "cpu-total"}
|
||||||
log.Fatal(err)
|
fields := map[string]interface{}{
|
||||||
|
"idle": 10.1,
|
||||||
|
"system": 53.3,
|
||||||
|
"user": 46.6,
|
||||||
}
|
}
|
||||||
|
pt := client.NewPoint("cpu_usage", tags, fields, time.Now())
|
||||||
|
bp.AddPoint(pt)
|
||||||
|
|
||||||
dur, ver, err := con.Ping()
|
// Write the batch
|
||||||
if err != nil {
|
c.Write(bp)
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
log.Printf("Happy as a Hippo! %v, %s", dur, ver)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -88,49 +98,50 @@ func main() {
|
||||||
### Inserting Data
|
### Inserting Data
|
||||||
|
|
||||||
Time series data aka *points* are written to the database using batch inserts.
|
Time series data aka *points* are written to the database using batch inserts.
|
||||||
The mechanism is to create one or more points and then create a batch aka *batch points*
|
The mechanism is to create one or more points and then create a batch aka
|
||||||
and write these to a given database and series. A series is a combination of a
|
*batch points* and write these to a given database and series. A series is a
|
||||||
measurement (time/values) and a set of tags.
|
combination of a measurement (time/values) and a set of tags.
|
||||||
|
|
||||||
In this sample we will create a batch of a 1,000 points. Each point has a time and
|
In this sample we will create a batch of a 1,000 points. Each point has a time and
|
||||||
a single value as well as 2 tags indicating a shape and color. We write these points
|
a single value as well as 2 tags indicating a shape and color. We write these points
|
||||||
to a database called _square_holes_ using a measurement named _shapes_.
|
to a database called _square_holes_ using a measurement named _shapes_.
|
||||||
|
|
||||||
NOTE: You can specify a RetentionPolicy as part of the batch points. If not
|
NOTE: You can specify a RetentionPolicy as part of the batch points. If not
|
||||||
provided InfluxDB will use the database _default_ retention policy. By default, the _default_
|
provided InfluxDB will use the database _default_ retention policy.
|
||||||
retention policy never deletes any data it contains.
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
func writePoints(con *client.Client) {
|
func writePoints(clnt client.Client) {
|
||||||
var (
|
sampleSize := 1000
|
||||||
shapes = []string{"circle", "rectangle", "square", "triangle"}
|
|
||||||
colors = []string{"red", "blue", "green"}
|
|
||||||
sampleSize = 1000
|
|
||||||
pts = make([]client.Point, sampleSize)
|
|
||||||
)
|
|
||||||
|
|
||||||
rand.Seed(42)
|
rand.Seed(42)
|
||||||
|
|
||||||
|
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
|
||||||
|
Database: "systemstats",
|
||||||
|
Precision: "us",
|
||||||
|
})
|
||||||
|
|
||||||
for i := 0; i < sampleSize; i++ {
|
for i := 0; i < sampleSize; i++ {
|
||||||
pts[i] = client.Point{
|
regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
|
||||||
Measurement: "shapes",
|
tags := map[string]string{
|
||||||
Tags: map[string]string{
|
"cpu": "cpu-total",
|
||||||
"color": strconv.Itoa(rand.Intn(len(colors))),
|
"host": fmt.Sprintf("host%d", rand.Intn(1000)),
|
||||||
"shape": strconv.Itoa(rand.Intn(len(shapes))),
|
"region": regions[rand.Intn(len(regions))],
|
||||||
},
|
|
||||||
Fields: map[string]interface{}{
|
|
||||||
"value": rand.Intn(sampleSize),
|
|
||||||
},
|
|
||||||
Time: time.Now(),
|
|
||||||
Precision: "s",
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bps := client.BatchPoints{
|
idle := rand.Float64() * 100.0
|
||||||
Points: pts,
|
fields := map[string]interface{}{
|
||||||
Database: MyDB,
|
"idle": idle,
|
||||||
RetentionPolicy: "default",
|
"busy": 100.0 - idle,
|
||||||
}
|
}
|
||||||
_, err := con.Write(bps)
|
|
||||||
|
bp.AddPoint(client.NewPoint(
|
||||||
|
"cpu_usage",
|
||||||
|
tags,
|
||||||
|
fields,
|
||||||
|
time.Now(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
err := clnt.Write(bp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -146,46 +157,47 @@ as follows:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
// queryDB convenience function to query the database
|
// queryDB convenience function to query the database
|
||||||
func queryDB(con *client.Client, cmd string) (res []client.Result, err error) {
|
func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) {
|
||||||
q := client.Query{
|
q := client.Query{
|
||||||
Command: cmd,
|
Command: cmd,
|
||||||
Database: MyDB,
|
Database: MyDB,
|
||||||
}
|
}
|
||||||
if response, err := con.Query(q); err == nil {
|
if response, err := clnt.Query(q); err == nil {
|
||||||
if response.Error() != nil {
|
if response.Error() != nil {
|
||||||
return res, response.Error()
|
return res, response.Error()
|
||||||
}
|
}
|
||||||
res = response.Results
|
res = response.Results
|
||||||
}
|
}
|
||||||
return
|
return response, nil
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Creating a Database
|
#### Creating a Database
|
||||||
|
|
||||||
```go
|
```go
|
||||||
_, err := queryDB(con, fmt.Sprintf("create database %s", MyDB))
|
_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Count Records
|
#### Count Records
|
||||||
|
|
||||||
```go
|
```go
|
||||||
q := fmt.Sprintf("select count(%s) from %s", "value", MyMeasurement)
|
q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement)
|
||||||
res, err := queryDB(con, q)
|
res, err := queryDB(clnt, q)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
count := res[0].Series[0].Values[0][1]
|
count := res[0].Series[0].Values[0][1]
|
||||||
log.Printf("Found a total of `%v records", count)
|
log.Printf("Found a total of %v records\n", count)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Find the last 10 _shapes_ records
|
#### Find the last 10 _shapes_ records
|
||||||
|
|
||||||
```go
|
```go
|
||||||
q := fmt.Sprintf("select * from %s limit %d", MyMeasurement, 20)
|
q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20)
|
||||||
res, err = queryDB(con, q)
|
res, err = queryDB(clnt, q)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,9 +99,16 @@ type Client struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// ConsistencyOne requires at least one data node acknowledged a write.
|
||||||
ConsistencyOne = "one"
|
ConsistencyOne = "one"
|
||||||
|
|
||||||
|
// ConsistencyAll requires all data nodes to acknowledge a write.
|
||||||
ConsistencyAll = "all"
|
ConsistencyAll = "all"
|
||||||
|
|
||||||
|
// ConsistencyQuorum requires a quorum of data nodes to acknowledge a write.
|
||||||
ConsistencyQuorum = "quorum"
|
ConsistencyQuorum = "quorum"
|
||||||
|
|
||||||
|
// ConsistencyAny allows for hinted hand off, potentially no write happened yet.
|
||||||
ConsistencyAny = "any"
|
ConsistencyAny = "any"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -464,6 +471,8 @@ func (p *Point) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(&point)
|
return json.Marshal(&point)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarshalString renders string representation of a Point with specified
|
||||||
|
// precision. The default precision is nanoseconds.
|
||||||
func (p *Point) MarshalString() string {
|
func (p *Point) MarshalString() string {
|
||||||
pt := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time)
|
pt := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time)
|
||||||
if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
|
if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
|
||||||
|
|
353
Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client.go
generated
vendored
Normal file
353
Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client.go
generated
vendored
Normal file
|
@ -0,0 +1,353 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
// URL of the InfluxDB database
|
||||||
|
URL *url.URL
|
||||||
|
|
||||||
|
// Username is the influxdb username, optional
|
||||||
|
Username string
|
||||||
|
|
||||||
|
// Password is the influxdb password, optional
|
||||||
|
Password string
|
||||||
|
|
||||||
|
// UserAgent is the http User Agent, defaults to "InfluxDBClient"
|
||||||
|
UserAgent string
|
||||||
|
|
||||||
|
// Timeout for influxdb writes, defaults to no timeout
|
||||||
|
Timeout time.Duration
|
||||||
|
|
||||||
|
// InsecureSkipVerify gets passed to the http client, if true, it will
|
||||||
|
// skip https certificate verification. Defaults to false
|
||||||
|
InsecureSkipVerify bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type BatchPointsConfig struct {
|
||||||
|
// Precision is the write precision of the points, defaults to "ns"
|
||||||
|
Precision string
|
||||||
|
|
||||||
|
// Database is the database to write points to
|
||||||
|
Database string
|
||||||
|
|
||||||
|
// RetentionPolicy is the retention policy of the points
|
||||||
|
RetentionPolicy string
|
||||||
|
|
||||||
|
// Write consistency is the number of servers required to confirm write
|
||||||
|
WriteConsistency string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Client interface {
|
||||||
|
// Write takes a BatchPoints object and writes all Points to InfluxDB.
|
||||||
|
Write(bp BatchPoints) error
|
||||||
|
|
||||||
|
// Query makes an InfluxDB Query on the database
|
||||||
|
Query(q Query) (*Response, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient creates a client interface from the given config.
|
||||||
|
func NewClient(conf Config) Client {
|
||||||
|
if conf.UserAgent == "" {
|
||||||
|
conf.UserAgent = "InfluxDBClient"
|
||||||
|
}
|
||||||
|
tr := &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{
|
||||||
|
InsecureSkipVerify: conf.InsecureSkipVerify,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return &client{
|
||||||
|
url: conf.URL,
|
||||||
|
username: conf.Username,
|
||||||
|
password: conf.Password,
|
||||||
|
useragent: conf.UserAgent,
|
||||||
|
httpClient: &http.Client{
|
||||||
|
Timeout: conf.Timeout,
|
||||||
|
Transport: tr,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type client struct {
|
||||||
|
url *url.URL
|
||||||
|
username string
|
||||||
|
password string
|
||||||
|
useragent string
|
||||||
|
httpClient *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchPoints is an interface into a batched grouping of points to write into
|
||||||
|
// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate
|
||||||
|
// batch for each goroutine.
|
||||||
|
type BatchPoints interface {
|
||||||
|
// AddPoint adds the given point to the Batch of points
|
||||||
|
AddPoint(p *Point)
|
||||||
|
// Points lists the points in the Batch
|
||||||
|
Points() []*Point
|
||||||
|
|
||||||
|
// Precision returns the currently set precision of this Batch
|
||||||
|
Precision() string
|
||||||
|
// SetPrecision sets the precision of this batch.
|
||||||
|
SetPrecision(s string) error
|
||||||
|
|
||||||
|
// Database returns the currently set database of this Batch
|
||||||
|
Database() string
|
||||||
|
// SetDatabase sets the database of this Batch
|
||||||
|
SetDatabase(s string)
|
||||||
|
|
||||||
|
// WriteConsistency returns the currently set write consistency of this Batch
|
||||||
|
WriteConsistency() string
|
||||||
|
// SetWriteConsistency sets the write consistency of this Batch
|
||||||
|
SetWriteConsistency(s string)
|
||||||
|
|
||||||
|
// RetentionPolicy returns the currently set retention policy of this Batch
|
||||||
|
RetentionPolicy() string
|
||||||
|
// SetRetentionPolicy sets the retention policy of this Batch
|
||||||
|
SetRetentionPolicy(s string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBatchPoints returns a BatchPoints interface based on the given config.
|
||||||
|
func NewBatchPoints(c BatchPointsConfig) (BatchPoints, error) {
|
||||||
|
if c.Precision == "" {
|
||||||
|
c.Precision = "ns"
|
||||||
|
}
|
||||||
|
if _, err := time.ParseDuration("1" + c.Precision); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
bp := &batchpoints{
|
||||||
|
database: c.Database,
|
||||||
|
precision: c.Precision,
|
||||||
|
retentionPolicy: c.RetentionPolicy,
|
||||||
|
writeConsistency: c.WriteConsistency,
|
||||||
|
}
|
||||||
|
return bp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type batchpoints struct {
|
||||||
|
points []*Point
|
||||||
|
database string
|
||||||
|
precision string
|
||||||
|
retentionPolicy string
|
||||||
|
writeConsistency string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) AddPoint(p *Point) {
|
||||||
|
bp.points = append(bp.points, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) Points() []*Point {
|
||||||
|
return bp.points
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) Precision() string {
|
||||||
|
return bp.precision
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) Database() string {
|
||||||
|
return bp.database
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) WriteConsistency() string {
|
||||||
|
return bp.writeConsistency
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) RetentionPolicy() string {
|
||||||
|
return bp.retentionPolicy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) SetPrecision(p string) error {
|
||||||
|
if _, err := time.ParseDuration("1" + p); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bp.precision = p
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) SetDatabase(db string) {
|
||||||
|
bp.database = db
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) SetWriteConsistency(wc string) {
|
||||||
|
bp.writeConsistency = wc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *batchpoints) SetRetentionPolicy(rp string) {
|
||||||
|
bp.retentionPolicy = rp
|
||||||
|
}
|
||||||
|
|
||||||
|
type Point struct {
|
||||||
|
pt models.Point
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPoint returns a point with the given timestamp. If a timestamp is not
|
||||||
|
// given, then data is sent to the database without a timestamp, in which case
|
||||||
|
// the server will assign local time upon reception. NOTE: it is recommended
|
||||||
|
// to send data with a timestamp.
|
||||||
|
func NewPoint(
|
||||||
|
name string,
|
||||||
|
tags map[string]string,
|
||||||
|
fields map[string]interface{},
|
||||||
|
t ...time.Time,
|
||||||
|
) *Point {
|
||||||
|
var T time.Time
|
||||||
|
if len(t) > 0 {
|
||||||
|
T = t[0]
|
||||||
|
}
|
||||||
|
return &Point{
|
||||||
|
pt: models.NewPoint(name, tags, fields, T),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a line-protocol string of the Point
|
||||||
|
func (p *Point) String() string {
|
||||||
|
return p.pt.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrecisionString returns a line-protocol string of the Point, at precision
|
||||||
|
func (p *Point) PrecisionString(precison string) string {
|
||||||
|
return p.pt.PrecisionString(precison)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) Write(bp BatchPoints) error {
|
||||||
|
u := c.url
|
||||||
|
u.Path = "write"
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
for _, p := range bp.Points() {
|
||||||
|
if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := b.WriteByte('\n'); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", u.String(), &b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "")
|
||||||
|
req.Header.Set("User-Agent", c.useragent)
|
||||||
|
if c.username != "" {
|
||||||
|
req.SetBasicAuth(c.username, c.password)
|
||||||
|
}
|
||||||
|
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("db", bp.Database())
|
||||||
|
params.Set("rp", bp.RetentionPolicy())
|
||||||
|
params.Set("precision", bp.Precision())
|
||||||
|
params.Set("consistency", bp.WriteConsistency())
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
|
||||||
|
var err = fmt.Errorf(string(body))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query defines a query to send to the server
|
||||||
|
type Query struct {
|
||||||
|
Command string
|
||||||
|
Database string
|
||||||
|
Precision string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response represents a list of statement results.
|
||||||
|
type Response struct {
|
||||||
|
Results []Result
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the first error from any statement.
|
||||||
|
// Returns nil if no errors occurred on any statements.
|
||||||
|
func (r *Response) Error() error {
|
||||||
|
if r.Err != nil {
|
||||||
|
return r.Err
|
||||||
|
}
|
||||||
|
for _, result := range r.Results {
|
||||||
|
if result.Err != nil {
|
||||||
|
return result.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Result represents a resultset returned from a single statement.
|
||||||
|
type Result struct {
|
||||||
|
Series []models.Row
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query sends a command to the server and returns the Response
|
||||||
|
func (c *client) Query(q Query) (*Response, error) {
|
||||||
|
u := c.url
|
||||||
|
|
||||||
|
u.Path = "query"
|
||||||
|
values := u.Query()
|
||||||
|
values.Set("q", q.Command)
|
||||||
|
values.Set("db", q.Database)
|
||||||
|
if q.Precision != "" {
|
||||||
|
values.Set("epoch", q.Precision)
|
||||||
|
}
|
||||||
|
u.RawQuery = values.Encode()
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("User-Agent", c.useragent)
|
||||||
|
if c.username != "" {
|
||||||
|
req.SetBasicAuth(c.username, c.password)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var response Response
|
||||||
|
dec := json.NewDecoder(resp.Body)
|
||||||
|
dec.UseNumber()
|
||||||
|
decErr := dec.Decode(&response)
|
||||||
|
|
||||||
|
// ignore this error if we got an invalid status code
|
||||||
|
if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK {
|
||||||
|
decErr = nil
|
||||||
|
}
|
||||||
|
// If we got a valid decode error, send that back
|
||||||
|
if decErr != nil {
|
||||||
|
return nil, decErr
|
||||||
|
}
|
||||||
|
// If we don't have an error in our json response, and didn't get statusOK
|
||||||
|
// then send back an error
|
||||||
|
if resp.StatusCode != http.StatusOK && response.Error() == nil {
|
||||||
|
return &response, fmt.Errorf("received status code %d from server",
|
||||||
|
resp.StatusCode)
|
||||||
|
}
|
||||||
|
return &response, nil
|
||||||
|
}
|
242
Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client_test.go
generated
vendored
Normal file
242
Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/client_test.go
generated
vendored
Normal file
|
@ -0,0 +1,242 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestClient_Query(t *testing.T) {
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var data Response
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
_ = json.NewEncoder(w).Encode(data)
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
u, _ := url.Parse(ts.URL)
|
||||||
|
config := Config{URL: u}
|
||||||
|
c := NewClient(config)
|
||||||
|
|
||||||
|
query := Query{}
|
||||||
|
_, err := c.Query(query)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClient_BasicAuth(t *testing.T) {
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
u, p, ok := r.BasicAuth()
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("basic auth error")
|
||||||
|
}
|
||||||
|
if u != "username" {
|
||||||
|
t.Errorf("unexpected username, expected %q, actual %q", "username", u)
|
||||||
|
}
|
||||||
|
if p != "password" {
|
||||||
|
t.Errorf("unexpected password, expected %q, actual %q", "password", p)
|
||||||
|
}
|
||||||
|
var data Response
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
_ = json.NewEncoder(w).Encode(data)
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
u, _ := url.Parse(ts.URL)
|
||||||
|
u.User = url.UserPassword("username", "password")
|
||||||
|
config := Config{URL: u, Username: "username", Password: "password"}
|
||||||
|
c := NewClient(config)
|
||||||
|
|
||||||
|
query := Query{}
|
||||||
|
_, err := c.Query(query)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClient_Write(t *testing.T) {
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var data Response
|
||||||
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
_ = json.NewEncoder(w).Encode(data)
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
u, _ := url.Parse(ts.URL)
|
||||||
|
config := Config{URL: u}
|
||||||
|
c := NewClient(config)
|
||||||
|
|
||||||
|
bp, err := NewBatchPoints(BatchPointsConfig{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
||||||
|
}
|
||||||
|
err = c.Write(bp)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClient_UserAgent(t *testing.T) {
|
||||||
|
receivedUserAgent := ""
|
||||||
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
receivedUserAgent = r.UserAgent()
|
||||||
|
|
||||||
|
var data Response
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
_ = json.NewEncoder(w).Encode(data)
|
||||||
|
}))
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
_, err := http.Get(ts.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
userAgent string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Empty user agent",
|
||||||
|
userAgent: "",
|
||||||
|
expected: "InfluxDBClient",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Custom user agent",
|
||||||
|
userAgent: "Test Influx Client",
|
||||||
|
expected: "Test Influx Client",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
u, _ := url.Parse(ts.URL)
|
||||||
|
config := Config{URL: u, UserAgent: test.userAgent}
|
||||||
|
c := NewClient(config)
|
||||||
|
|
||||||
|
receivedUserAgent = ""
|
||||||
|
query := Query{}
|
||||||
|
_, err = c.Query(query)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(receivedUserAgent, test.expected) {
|
||||||
|
t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
|
||||||
|
}
|
||||||
|
|
||||||
|
receivedUserAgent = ""
|
||||||
|
bp, _ := NewBatchPoints(BatchPointsConfig{})
|
||||||
|
err = c.Write(bp)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(receivedUserAgent, test.expected) {
|
||||||
|
t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
|
||||||
|
}
|
||||||
|
|
||||||
|
receivedUserAgent = ""
|
||||||
|
_, err := c.Query(query)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
||||||
|
}
|
||||||
|
if receivedUserAgent != test.expected {
|
||||||
|
t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClient_PointString(t *testing.T) {
|
||||||
|
const shortForm = "2006-Jan-02"
|
||||||
|
time1, _ := time.Parse(shortForm, "2013-Feb-03")
|
||||||
|
tags := map[string]string{"cpu": "cpu-total"}
|
||||||
|
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
|
||||||
|
p := NewPoint("cpu_usage", tags, fields, time1)
|
||||||
|
|
||||||
|
s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000000000"
|
||||||
|
if p.String() != s {
|
||||||
|
t.Errorf("Point String Error, got %s, expected %s", p.String(), s)
|
||||||
|
}
|
||||||
|
|
||||||
|
s = "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000"
|
||||||
|
if p.PrecisionString("ms") != s {
|
||||||
|
t.Errorf("Point String Error, got %s, expected %s",
|
||||||
|
p.PrecisionString("ms"), s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClient_PointWithoutTimeString(t *testing.T) {
|
||||||
|
tags := map[string]string{"cpu": "cpu-total"}
|
||||||
|
fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0}
|
||||||
|
p := NewPoint("cpu_usage", tags, fields)
|
||||||
|
|
||||||
|
s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39"
|
||||||
|
if p.String() != s {
|
||||||
|
t.Errorf("Point String Error, got %s, expected %s", p.String(), s)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.PrecisionString("ms") != s {
|
||||||
|
t.Errorf("Point String Error, got %s, expected %s",
|
||||||
|
p.PrecisionString("ms"), s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBatchPoints_PrecisionError(t *testing.T) {
|
||||||
|
_, err := NewBatchPoints(BatchPointsConfig{Precision: "foobar"})
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("Precision: foobar should have errored")
|
||||||
|
}
|
||||||
|
|
||||||
|
bp, _ := NewBatchPoints(BatchPointsConfig{Precision: "ns"})
|
||||||
|
err = bp.SetPrecision("foobar")
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("Precision: foobar should have errored")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBatchPoints_SettersGetters(t *testing.T) {
|
||||||
|
bp, _ := NewBatchPoints(BatchPointsConfig{
|
||||||
|
Precision: "ns",
|
||||||
|
Database: "db",
|
||||||
|
RetentionPolicy: "rp",
|
||||||
|
WriteConsistency: "wc",
|
||||||
|
})
|
||||||
|
if bp.Precision() != "ns" {
|
||||||
|
t.Errorf("Expected: %s, got %s", bp.Precision(), "ns")
|
||||||
|
}
|
||||||
|
if bp.Database() != "db" {
|
||||||
|
t.Errorf("Expected: %s, got %s", bp.Database(), "db")
|
||||||
|
}
|
||||||
|
if bp.RetentionPolicy() != "rp" {
|
||||||
|
t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp")
|
||||||
|
}
|
||||||
|
if bp.WriteConsistency() != "wc" {
|
||||||
|
t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc")
|
||||||
|
}
|
||||||
|
|
||||||
|
bp.SetDatabase("db2")
|
||||||
|
bp.SetRetentionPolicy("rp2")
|
||||||
|
bp.SetWriteConsistency("wc2")
|
||||||
|
err := bp.SetPrecision("s")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Did not expect error: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if bp.Precision() != "s" {
|
||||||
|
t.Errorf("Expected: %s, got %s", bp.Precision(), "s")
|
||||||
|
}
|
||||||
|
if bp.Database() != "db2" {
|
||||||
|
t.Errorf("Expected: %s, got %s", bp.Database(), "db2")
|
||||||
|
}
|
||||||
|
if bp.RetentionPolicy() != "rp2" {
|
||||||
|
t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp2")
|
||||||
|
}
|
||||||
|
if bp.WriteConsistency() != "wc2" {
|
||||||
|
t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc2")
|
||||||
|
}
|
||||||
|
}
|
129
Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/example/example.go
generated
vendored
Normal file
129
Godeps/_workspace/src/github.com/influxdb/influxdb/client/v2/example/example.go
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
||||||
|
package client_example
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math/rand"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/client/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExampleNewClient() client.Client {
|
||||||
|
u, _ := url.Parse("http://localhost:8086")
|
||||||
|
|
||||||
|
// NOTE: this assumes you've setup a user and have setup shell env variables,
|
||||||
|
// namely INFLUX_USER/INFLUX_PWD. If not just ommit Username/Password below.
|
||||||
|
client := client.NewClient(client.Config{
|
||||||
|
URL: u,
|
||||||
|
Username: os.Getenv("INFLUX_USER"),
|
||||||
|
Password: os.Getenv("INFLUX_PWD"),
|
||||||
|
})
|
||||||
|
return client
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleWrite() {
|
||||||
|
// Make client
|
||||||
|
u, _ := url.Parse("http://localhost:8086")
|
||||||
|
c := client.NewClient(client.Config{
|
||||||
|
URL: u,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Create a new point batch
|
||||||
|
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
|
||||||
|
Database: "BumbleBeeTuna",
|
||||||
|
Precision: "s",
|
||||||
|
})
|
||||||
|
|
||||||
|
// Create a point and add to batch
|
||||||
|
tags := map[string]string{"cpu": "cpu-total"}
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"idle": 10.1,
|
||||||
|
"system": 53.3,
|
||||||
|
"user": 46.6,
|
||||||
|
}
|
||||||
|
pt := client.NewPoint("cpu_usage", tags, fields, time.Now())
|
||||||
|
bp.AddPoint(pt)
|
||||||
|
|
||||||
|
// Write the batch
|
||||||
|
c.Write(bp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write 1000 points
|
||||||
|
func ExampleWrite1000() {
|
||||||
|
sampleSize := 1000
|
||||||
|
|
||||||
|
// Make client
|
||||||
|
u, _ := url.Parse("http://localhost:8086")
|
||||||
|
clnt := client.NewClient(client.Config{
|
||||||
|
URL: u,
|
||||||
|
})
|
||||||
|
|
||||||
|
rand.Seed(42)
|
||||||
|
|
||||||
|
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
|
||||||
|
Database: "systemstats",
|
||||||
|
Precision: "us",
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := 0; i < sampleSize; i++ {
|
||||||
|
regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"}
|
||||||
|
tags := map[string]string{
|
||||||
|
"cpu": "cpu-total",
|
||||||
|
"host": fmt.Sprintf("host%d", rand.Intn(1000)),
|
||||||
|
"region": regions[rand.Intn(len(regions))],
|
||||||
|
}
|
||||||
|
|
||||||
|
idle := rand.Float64() * 100.0
|
||||||
|
fields := map[string]interface{}{
|
||||||
|
"idle": idle,
|
||||||
|
"busy": 100.0 - idle,
|
||||||
|
}
|
||||||
|
|
||||||
|
bp.AddPoint(client.NewPoint(
|
||||||
|
"cpu_usage",
|
||||||
|
tags,
|
||||||
|
fields,
|
||||||
|
time.Now(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
err := clnt.Write(bp)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleQuery() {
|
||||||
|
// Make client
|
||||||
|
u, _ := url.Parse("http://localhost:8086")
|
||||||
|
c := client.NewClient(client.Config{
|
||||||
|
URL: u,
|
||||||
|
})
|
||||||
|
|
||||||
|
q := client.Query{
|
||||||
|
Command: "SELECT count(value) FROM shapes",
|
||||||
|
Database: "square_holes",
|
||||||
|
Precision: "ns",
|
||||||
|
}
|
||||||
|
if response, err := c.Query(q); err == nil && response.Error() == nil {
|
||||||
|
log.Println(response.Results)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleCreateDatabase() {
|
||||||
|
// Make client
|
||||||
|
u, _ := url.Parse("http://localhost:8086")
|
||||||
|
c := client.NewClient(client.Config{
|
||||||
|
URL: u,
|
||||||
|
})
|
||||||
|
|
||||||
|
q := client.Query{
|
||||||
|
Command: "CREATE DATABASE telegraf",
|
||||||
|
}
|
||||||
|
if response, err := c.Query(q); err == nil && response.Error() == nil {
|
||||||
|
log.Println(response.Results)
|
||||||
|
}
|
||||||
|
}
|
|
@ -31,6 +31,8 @@ const (
|
||||||
statWriteTimeout = "write_timeout"
|
statWriteTimeout = "write_timeout"
|
||||||
statWriteErr = "write_error"
|
statWriteErr = "write_error"
|
||||||
statWritePointReqHH = "point_req_hh"
|
statWritePointReqHH = "point_req_hh"
|
||||||
|
statSubWriteOK = "sub_write_ok"
|
||||||
|
statSubWriteDrop = "sub_write_drop"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -107,6 +109,10 @@ type PointsWriter struct {
|
||||||
WriteShard(shardID, ownerID uint64, points []models.Point) error
|
WriteShard(shardID, ownerID uint64, points []models.Point) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Subscriber interface {
|
||||||
|
Points() chan<- *WritePointsRequest
|
||||||
|
}
|
||||||
|
|
||||||
statMap *expvar.Map
|
statMap *expvar.Map
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,6 +210,18 @@ func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error)
|
||||||
return mapping, nil
|
return mapping, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WritePointsInto is a copy of WritePoints that uses a tsdb structure instead of
|
||||||
|
// a cluster structure for information. This is to avoid a circular dependency
|
||||||
|
func (w *PointsWriter) WritePointsInto(p *tsdb.IntoWriteRequest) error {
|
||||||
|
req := WritePointsRequest{
|
||||||
|
Database: p.Database,
|
||||||
|
RetentionPolicy: p.RetentionPolicy,
|
||||||
|
ConsistencyLevel: ConsistencyLevelAny,
|
||||||
|
Points: p.Points,
|
||||||
|
}
|
||||||
|
return w.WritePoints(&req)
|
||||||
|
}
|
||||||
|
|
||||||
// WritePoints writes across multiple local and remote data nodes according the consistency level.
|
// WritePoints writes across multiple local and remote data nodes according the consistency level.
|
||||||
func (w *PointsWriter) WritePoints(p *WritePointsRequest) error {
|
func (w *PointsWriter) WritePoints(p *WritePointsRequest) error {
|
||||||
w.statMap.Add(statWriteReq, 1)
|
w.statMap.Add(statWriteReq, 1)
|
||||||
|
@ -233,6 +251,16 @@ func (w *PointsWriter) WritePoints(p *WritePointsRequest) error {
|
||||||
}(shardMappings.Shards[shardID], p.Database, p.RetentionPolicy, points)
|
}(shardMappings.Shards[shardID], p.Database, p.RetentionPolicy, points)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Send points to subscriptions if possible.
|
||||||
|
if w.Subscriber != nil {
|
||||||
|
select {
|
||||||
|
case w.Subscriber.Points() <- p:
|
||||||
|
w.statMap.Add(statSubWriteOK, 1)
|
||||||
|
default:
|
||||||
|
w.statMap.Add(statSubWriteDrop, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for range shardMappings.Points {
|
for range shardMappings.Points {
|
||||||
select {
|
select {
|
||||||
case <-w.closing:
|
case <-w.closing:
|
||||||
|
|
26
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go
generated
vendored
26
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go
generated
vendored
|
@ -308,11 +308,19 @@ func TestPointsWriter_WritePoints(t *testing.T) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
ms.NodeIDFn = func() uint64 { return 1 }
|
ms.NodeIDFn = func() uint64 { return 1 }
|
||||||
|
|
||||||
|
subPoints := make(chan *cluster.WritePointsRequest, 1)
|
||||||
|
sub := Subscriber{}
|
||||||
|
sub.PointsFn = func() chan<- *cluster.WritePointsRequest {
|
||||||
|
return subPoints
|
||||||
|
}
|
||||||
|
|
||||||
c := cluster.NewPointsWriter()
|
c := cluster.NewPointsWriter()
|
||||||
c.MetaStore = ms
|
c.MetaStore = ms
|
||||||
c.ShardWriter = sw
|
c.ShardWriter = sw
|
||||||
c.TSDBStore = store
|
c.TSDBStore = store
|
||||||
c.HintedHandoff = hh
|
c.HintedHandoff = hh
|
||||||
|
c.Subscriber = sub
|
||||||
|
|
||||||
err := c.WritePoints(pr)
|
err := c.WritePoints(pr)
|
||||||
if err == nil && test.expErr != nil {
|
if err == nil && test.expErr != nil {
|
||||||
|
@ -325,6 +333,16 @@ func TestPointsWriter_WritePoints(t *testing.T) {
|
||||||
if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() {
|
if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() {
|
||||||
t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr)
|
t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr)
|
||||||
}
|
}
|
||||||
|
if test.expErr == nil {
|
||||||
|
select {
|
||||||
|
case p := <-subPoints:
|
||||||
|
if p != pr {
|
||||||
|
t.Errorf("PointsWriter.WritePoints(): '%s' error: unexpected WritePointsRequest got %v, exp %v", test.name, p, pr)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
t.Errorf("PointsWriter.WritePoints(): '%s' error: Subscriber.Points not called", test.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -406,6 +424,14 @@ func (m MetaStore) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupI
|
||||||
return m.ShardOwnerFn(shardID)
|
return m.ShardOwnerFn(shardID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Subscriber struct {
|
||||||
|
PointsFn func() chan<- *cluster.WritePointsRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Subscriber) Points() chan<- *cluster.WritePointsRequest {
|
||||||
|
return s.PointsFn()
|
||||||
|
}
|
||||||
|
|
||||||
func NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *meta.RetentionPolicyInfo {
|
func NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *meta.RetentionPolicyInfo {
|
||||||
shards := []meta.ShardInfo{}
|
shards := []meta.ShardInfo{}
|
||||||
owners := []meta.ShardOwner{}
|
owners := []meta.ShardOwner{}
|
||||||
|
|
|
@ -111,7 +111,7 @@ type WritePointsRequest struct {
|
||||||
Points []models.Point
|
Points []models.Point
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddPoint adds a point to the WritePointRequest with field name 'value'
|
// AddPoint adds a point to the WritePointRequest with field key 'value'
|
||||||
func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) {
|
func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) {
|
||||||
w.Points = append(w.Points, models.NewPoint(
|
w.Points = append(w.Points, models.NewPoint(
|
||||||
name, tags, map[string]interface{}{"value": value}, timestamp,
|
name, tags, map[string]interface{}{"value": value}, timestamp,
|
||||||
|
|
|
@ -158,7 +158,10 @@ Examples:
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.connect(""); err != nil {
|
if err := c.connect(""); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr,
|
||||||
|
"Failed to connect to %s\nPlease check your connection settings and ensure 'influxd' is running.\n",
|
||||||
|
c.Client.Addr())
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if c.Execute == "" && !c.Import {
|
if c.Execute == "" && !c.Import {
|
||||||
fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.Version)
|
fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.Version)
|
||||||
|
|
|
@ -2,7 +2,6 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"flag"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
|
@ -13,15 +12,9 @@ import (
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
"github.com/influxdb/influxdb/tsdb"
|
||||||
_ "github.com/influxdb/influxdb/tsdb/engine"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func cmdInfo(path string) {
|
||||||
|
|
||||||
var path string
|
|
||||||
flag.StringVar(&path, "p", os.Getenv("HOME")+"/.influxdb", "Root storage path. [$HOME/.influxdb]")
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
tstore := tsdb.NewStore(filepath.Join(path, "data"))
|
tstore := tsdb.NewStore(filepath.Join(path, "data"))
|
||||||
tstore.Logger = log.New(ioutil.Discard, "", log.LstdFlags)
|
tstore.Logger = log.New(ioutil.Discard, "", log.LstdFlags)
|
||||||
tstore.EngineOptions.Config.Dir = filepath.Join(path, "data")
|
tstore.EngineOptions.Config.Dir = filepath.Join(path, "data")
|
||||||
|
@ -38,9 +31,8 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Summary stats
|
// Summary stats
|
||||||
fmt.Printf("Shards: %d, Indexes: %d, Databases: %d, Disk Size: %d, Series: %d\n",
|
fmt.Printf("Shards: %d, Indexes: %d, Databases: %d, Disk Size: %d, Series: %d\n\n",
|
||||||
tstore.ShardN(), tstore.DatabaseIndexN(), len(tstore.Databases()), size, countSeries(tstore))
|
tstore.ShardN(), tstore.DatabaseIndexN(), len(tstore.Databases()), size, countSeries(tstore))
|
||||||
fmt.Println()
|
|
||||||
|
|
||||||
tw := tabwriter.NewWriter(os.Stdout, 16, 8, 0, '\t', 0)
|
tw := tabwriter.NewWriter(os.Stdout, 16, 8, 0, '\t', 0)
|
||||||
|
|
||||||
|
@ -70,34 +62,14 @@ func main() {
|
||||||
// Sample a point from each measurement to determine the field types
|
// Sample a point from each measurement to determine the field types
|
||||||
for _, shardID := range shardIDs {
|
for _, shardID := range shardIDs {
|
||||||
shard := tstore.Shard(shardID)
|
shard := tstore.Shard(shardID)
|
||||||
tx, err := shard.ReadOnlyTx()
|
codec := shard.FieldCodec(m.Name)
|
||||||
if err != nil {
|
for _, field := range codec.Fields() {
|
||||||
fmt.Printf("Failed to get transaction: %v", err)
|
ft := fmt.Sprintf("%s:%s", field.Name, field.Type)
|
||||||
}
|
|
||||||
|
|
||||||
for _, key := range series {
|
|
||||||
fieldSummary := []string{}
|
|
||||||
cursor := tx.Cursor(key, m.FieldNames(), shard.FieldCodec(m.Name), true)
|
|
||||||
|
|
||||||
// Series doesn't exist in this shard
|
|
||||||
if cursor == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seek to the beginning
|
|
||||||
_, fields := cursor.SeekTo(0)
|
|
||||||
if fields, ok := fields.(map[string]interface{}); ok {
|
|
||||||
for field, value := range fields {
|
|
||||||
fieldSummary = append(fieldSummary, fmt.Sprintf("%s:%T", field, value))
|
|
||||||
}
|
|
||||||
sort.Strings(fieldSummary)
|
|
||||||
|
|
||||||
fmt.Fprintf(tw, "%d\t%s\t%s\t%d/%d\t%d [%s]\t%d\n", shardID, db, m.Name, len(tags), tagValues,
|
fmt.Fprintf(tw, "%d\t%s\t%s\t%d/%d\t%d [%s]\t%d\n", shardID, db, m.Name, len(tags), tagValues,
|
||||||
len(fields), strings.Join(fieldSummary, ","), len(series))
|
len(fields), ft, len(series))
|
||||||
|
|
||||||
}
|
}
|
||||||
break
|
|
||||||
}
|
|
||||||
tx.Rollback()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
87
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_inspect/main.go
generated
vendored
Normal file
87
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_inspect/main.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
_ "github.com/influxdb/influxdb/tsdb/engine"
|
||||||
|
)
|
||||||
|
|
||||||
|
func usage() {
|
||||||
|
println(`Usage: influx_inspect <command> [options]
|
||||||
|
|
||||||
|
Displays detailed information about InfluxDB data files.
|
||||||
|
`)
|
||||||
|
|
||||||
|
println(`Commands:
|
||||||
|
info - displays series meta-data for all shards. Default location [$HOME/.influxdb]
|
||||||
|
dumptsm - dumps low-level details about tsm1 files.`)
|
||||||
|
println()
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
|
||||||
|
flag.Usage = usage
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if len(flag.Args()) == 0 {
|
||||||
|
flag.Usage()
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch flag.Args()[0] {
|
||||||
|
case "info":
|
||||||
|
var path string
|
||||||
|
fs := flag.NewFlagSet("info", flag.ExitOnError)
|
||||||
|
fs.StringVar(&path, "dir", os.Getenv("HOME")+"/.influxdb", "Root storage path. [$HOME/.influxdb]")
|
||||||
|
|
||||||
|
fs.Usage = func() {
|
||||||
|
println("Usage: influx_inspect info [options]\n\n Displays series meta-data for all shards..")
|
||||||
|
println()
|
||||||
|
println("Options:")
|
||||||
|
fs.PrintDefaults()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fs.Parse(flag.Args()[1:]); err != nil {
|
||||||
|
fmt.Printf("%v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
cmdInfo(path)
|
||||||
|
case "dumptsm":
|
||||||
|
var dumpAll bool
|
||||||
|
opts := &tsdmDumpOpts{}
|
||||||
|
fs := flag.NewFlagSet("file", flag.ExitOnError)
|
||||||
|
fs.BoolVar(&opts.dumpIndex, "index", false, "Dump raw index data")
|
||||||
|
fs.BoolVar(&opts.dumpBlocks, "blocks", false, "Dump raw block data")
|
||||||
|
fs.BoolVar(&dumpAll, "all", false, "Dump all data. Caution: This may print a lot of information")
|
||||||
|
fs.StringVar(&opts.filterKey, "filter-key", "", "Only display index and block data match this key substring")
|
||||||
|
|
||||||
|
fs.Usage = func() {
|
||||||
|
println("Usage: influx_inspect dumptsm [options] <path>\n\n Dumps low-level details about tsm1 files.")
|
||||||
|
println()
|
||||||
|
println("Options:")
|
||||||
|
fs.PrintDefaults()
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fs.Parse(flag.Args()[1:]); err != nil {
|
||||||
|
fmt.Printf("%v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fs.Args()) == 0 || fs.Args()[0] == "" {
|
||||||
|
fmt.Printf("TSM file not specified\n\n")
|
||||||
|
fs.Usage()
|
||||||
|
fs.PrintDefaults()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
opts.path = fs.Args()[0]
|
||||||
|
opts.dumpBlocks = opts.dumpBlocks || dumpAll || opts.filterKey != ""
|
||||||
|
opts.dumpIndex = opts.dumpIndex || dumpAll || opts.filterKey != ""
|
||||||
|
cmdDumpTsm1(opts)
|
||||||
|
default:
|
||||||
|
flag.Usage()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
427
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_inspect/tsm.go
generated
vendored
Normal file
427
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_inspect/tsm.go
generated
vendored
Normal file
|
@ -0,0 +1,427 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"text/tabwriter"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/snappy"
|
||||||
|
"github.com/influxdb/influxdb/tsdb"
|
||||||
|
"github.com/influxdb/influxdb/tsdb/engine/tsm1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type tsdmDumpOpts struct {
|
||||||
|
dumpIndex bool
|
||||||
|
dumpBlocks bool
|
||||||
|
filterKey string
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
type tsmIndex struct {
|
||||||
|
series int
|
||||||
|
offset int64
|
||||||
|
minTime time.Time
|
||||||
|
maxTime time.Time
|
||||||
|
blocks []*block
|
||||||
|
}
|
||||||
|
|
||||||
|
type block struct {
|
||||||
|
id uint64
|
||||||
|
offset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type blockStats struct {
|
||||||
|
min, max int
|
||||||
|
counts [][]int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockStats) inc(typ int, enc byte) {
|
||||||
|
for len(b.counts) <= typ {
|
||||||
|
b.counts = append(b.counts, []int{})
|
||||||
|
}
|
||||||
|
for len(b.counts[typ]) <= int(enc) {
|
||||||
|
b.counts[typ] = append(b.counts[typ], 0)
|
||||||
|
}
|
||||||
|
b.counts[typ][enc] += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockStats) size(sz int) {
|
||||||
|
if b.min == 0 || sz < b.min {
|
||||||
|
b.min = sz
|
||||||
|
}
|
||||||
|
if b.min == 0 || sz > b.max {
|
||||||
|
b.max = sz
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
fieldType = []string{
|
||||||
|
"timestamp", "float", "int", "bool", "string",
|
||||||
|
}
|
||||||
|
blockTypes = []string{
|
||||||
|
"float64", "int64", "bool", "string",
|
||||||
|
}
|
||||||
|
timeEnc = []string{
|
||||||
|
"none", "s8b", "rle",
|
||||||
|
}
|
||||||
|
floatEnc = []string{
|
||||||
|
"none", "gor",
|
||||||
|
}
|
||||||
|
intEnc = []string{
|
||||||
|
"none", "s8b", "rle",
|
||||||
|
}
|
||||||
|
boolEnc = []string{
|
||||||
|
"none", "bp",
|
||||||
|
}
|
||||||
|
stringEnc = []string{
|
||||||
|
"none", "snpy",
|
||||||
|
}
|
||||||
|
encDescs = [][]string{
|
||||||
|
timeEnc, floatEnc, intEnc, boolEnc, stringEnc,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func readFields(path string) (map[string]*tsdb.MeasurementFields, error) {
|
||||||
|
fields := make(map[string]*tsdb.MeasurementFields)
|
||||||
|
|
||||||
|
f, err := os.OpenFile(filepath.Join(path, tsm1.FieldsFileExtension), os.O_RDONLY, 0666)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return fields, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b, err := ioutil.ReadAll(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := snappy.Decode(nil, b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(data, &fields); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fields, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readSeries(path string) (map[string]*tsdb.Series, error) {
|
||||||
|
series := make(map[string]*tsdb.Series)
|
||||||
|
|
||||||
|
f, err := os.OpenFile(filepath.Join(path, tsm1.SeriesFileExtension), os.O_RDONLY, 0666)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return series, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
b, err := ioutil.ReadAll(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := snappy.Decode(nil, b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(data, &series); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return series, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readIds(path string) (map[string]uint64, error) {
|
||||||
|
f, err := os.OpenFile(filepath.Join(path, tsm1.IDsFileExtension), os.O_RDONLY, 0666)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b, err := ioutil.ReadAll(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = snappy.Decode(nil, b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ids := make(map[string]uint64)
|
||||||
|
if b != nil {
|
||||||
|
if err := json.Unmarshal(b, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ids, err
|
||||||
|
}
|
||||||
|
func readIndex(f *os.File) *tsmIndex {
|
||||||
|
// Get the file size
|
||||||
|
stat, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
panic(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek to the series count
|
||||||
|
f.Seek(-4, os.SEEK_END)
|
||||||
|
b := make([]byte, 8)
|
||||||
|
_, err = f.Read(b[:4])
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err.Error())
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
seriesCount := binary.BigEndian.Uint32(b)
|
||||||
|
|
||||||
|
// Get the min time
|
||||||
|
f.Seek(-20, os.SEEK_END)
|
||||||
|
f.Read(b)
|
||||||
|
minTime := time.Unix(0, int64(btou64(b)))
|
||||||
|
|
||||||
|
// Get max time
|
||||||
|
f.Seek(-12, os.SEEK_END)
|
||||||
|
f.Read(b)
|
||||||
|
maxTime := time.Unix(0, int64(btou64(b)))
|
||||||
|
|
||||||
|
// Figure out where the index starts
|
||||||
|
indexStart := stat.Size() - int64(seriesCount*12+20)
|
||||||
|
|
||||||
|
// Seek to the start of the index
|
||||||
|
f.Seek(indexStart, os.SEEK_SET)
|
||||||
|
count := int(seriesCount)
|
||||||
|
index := &tsmIndex{
|
||||||
|
offset: indexStart,
|
||||||
|
minTime: minTime,
|
||||||
|
maxTime: maxTime,
|
||||||
|
series: count,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the index entries
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
f.Read(b)
|
||||||
|
id := binary.BigEndian.Uint64(b)
|
||||||
|
f.Read(b[:4])
|
||||||
|
pos := binary.BigEndian.Uint32(b[:4])
|
||||||
|
index.blocks = append(index.blocks, &block{id: id, offset: int64(pos)})
|
||||||
|
}
|
||||||
|
|
||||||
|
return index
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmdDumpTsm1(opts *tsdmDumpOpts) {
|
||||||
|
var errors []error
|
||||||
|
|
||||||
|
f, err := os.Open(opts.path)
|
||||||
|
if err != nil {
|
||||||
|
println(err.Error())
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the file size
|
||||||
|
stat, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
println(err.Error())
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := make([]byte, 8)
|
||||||
|
f.Read(b[:4])
|
||||||
|
|
||||||
|
// Verify magic number
|
||||||
|
if binary.BigEndian.Uint32(b[:4]) != 0x16D116D1 {
|
||||||
|
println("Not a tsm1 file.")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
ids, err := readIds(filepath.Dir(opts.path))
|
||||||
|
if err != nil {
|
||||||
|
println("Failed to read series:", err.Error())
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
invIds := map[uint64]string{}
|
||||||
|
for k, v := range ids {
|
||||||
|
invIds[v] = k
|
||||||
|
}
|
||||||
|
|
||||||
|
index := readIndex(f)
|
||||||
|
blockStats := &blockStats{}
|
||||||
|
|
||||||
|
println("Summary:")
|
||||||
|
fmt.Printf(" File: %s\n", opts.path)
|
||||||
|
fmt.Printf(" Time Range: %s - %s\n",
|
||||||
|
index.minTime.UTC().Format(time.RFC3339Nano),
|
||||||
|
index.maxTime.UTC().Format(time.RFC3339Nano),
|
||||||
|
)
|
||||||
|
fmt.Printf(" Duration: %s ", index.maxTime.Sub(index.minTime))
|
||||||
|
fmt.Printf(" Series: %d ", index.series)
|
||||||
|
fmt.Printf(" File Size: %d\n", stat.Size())
|
||||||
|
println()
|
||||||
|
|
||||||
|
tw := tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0)
|
||||||
|
fmt.Fprintln(tw, " "+strings.Join([]string{"Pos", "ID", "Ofs", "Key", "Field"}, "\t"))
|
||||||
|
for i, block := range index.blocks {
|
||||||
|
key := invIds[block.id]
|
||||||
|
split := strings.Split(key, "#!~#")
|
||||||
|
|
||||||
|
// We dont' know know if we have fields so use an informative default
|
||||||
|
var measurement, field string = "UNKNOWN", "UNKNOWN"
|
||||||
|
|
||||||
|
// We read some IDs from the ids file
|
||||||
|
if len(invIds) > 0 {
|
||||||
|
// Change the default to error until we know we have a valid key
|
||||||
|
measurement = "ERR"
|
||||||
|
field = "ERR"
|
||||||
|
|
||||||
|
// Possible corruption? Try to read as much as we can and point to the problem.
|
||||||
|
if key == "" {
|
||||||
|
errors = append(errors, fmt.Errorf("index pos %d, field id: %d, missing key for id.", i, block.id))
|
||||||
|
} else if len(split) < 2 {
|
||||||
|
errors = append(errors, fmt.Errorf("index pos %d, field id: %d, key corrupt: got '%v'", i, block.id, key))
|
||||||
|
} else {
|
||||||
|
measurement = split[0]
|
||||||
|
field = split[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.filterKey != "" && !strings.Contains(key, opts.filterKey) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintln(tw, " "+strings.Join([]string{
|
||||||
|
strconv.FormatInt(int64(i), 10),
|
||||||
|
strconv.FormatUint(block.id, 10),
|
||||||
|
strconv.FormatInt(int64(block.offset), 10),
|
||||||
|
measurement,
|
||||||
|
field,
|
||||||
|
}, "\t"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.dumpIndex {
|
||||||
|
println("Index:")
|
||||||
|
tw.Flush()
|
||||||
|
println()
|
||||||
|
}
|
||||||
|
|
||||||
|
tw = tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0)
|
||||||
|
fmt.Fprintln(tw, " "+strings.Join([]string{"Blk", "Ofs", "Len", "ID", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t"))
|
||||||
|
|
||||||
|
// Staring at 4 because the magic number is 4 bytes
|
||||||
|
i := int64(4)
|
||||||
|
var blockCount, pointCount, blockSize int64
|
||||||
|
indexSize := stat.Size() - index.offset
|
||||||
|
|
||||||
|
// Start at the beginning and read every block
|
||||||
|
for i < index.offset {
|
||||||
|
f.Seek(int64(i), 0)
|
||||||
|
|
||||||
|
f.Read(b)
|
||||||
|
id := btou64(b)
|
||||||
|
f.Read(b[:4])
|
||||||
|
length := binary.BigEndian.Uint32(b[:4])
|
||||||
|
buf := make([]byte, length)
|
||||||
|
f.Read(buf)
|
||||||
|
|
||||||
|
blockSize += int64(len(buf)) + 12
|
||||||
|
|
||||||
|
startTime := time.Unix(0, int64(btou64(buf[:8])))
|
||||||
|
blockType := buf[8]
|
||||||
|
|
||||||
|
encoded := buf[9:]
|
||||||
|
|
||||||
|
v, err := tsm1.DecodeBlock(buf)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err.Error())
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
pointCount += int64(len(v))
|
||||||
|
|
||||||
|
// Length of the timestamp block
|
||||||
|
tsLen, j := binary.Uvarint(encoded)
|
||||||
|
|
||||||
|
// Unpack the timestamp bytes
|
||||||
|
ts := encoded[int(j) : int(j)+int(tsLen)]
|
||||||
|
|
||||||
|
// Unpack the value bytes
|
||||||
|
values := encoded[int(j)+int(tsLen):]
|
||||||
|
|
||||||
|
tsEncoding := timeEnc[int(ts[0]>>4)]
|
||||||
|
vEncoding := encDescs[int(blockType+1)][values[0]>>4]
|
||||||
|
|
||||||
|
typeDesc := blockTypes[blockType]
|
||||||
|
|
||||||
|
blockStats.inc(0, ts[0]>>4)
|
||||||
|
blockStats.inc(int(blockType+1), values[0]>>4)
|
||||||
|
blockStats.size(len(buf))
|
||||||
|
|
||||||
|
if opts.filterKey != "" && !strings.Contains(invIds[id], opts.filterKey) {
|
||||||
|
i += (12 + int64(length))
|
||||||
|
blockCount += 1
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(tw, " "+strings.Join([]string{
|
||||||
|
strconv.FormatInt(blockCount, 10),
|
||||||
|
strconv.FormatInt(i, 10),
|
||||||
|
strconv.FormatInt(int64(len(buf)), 10),
|
||||||
|
strconv.FormatUint(id, 10),
|
||||||
|
typeDesc,
|
||||||
|
startTime.UTC().Format(time.RFC3339Nano),
|
||||||
|
strconv.FormatInt(int64(len(v)), 10),
|
||||||
|
fmt.Sprintf("%s/%s", tsEncoding, vEncoding),
|
||||||
|
fmt.Sprintf("%d/%d", len(ts), len(values)),
|
||||||
|
}, "\t"))
|
||||||
|
|
||||||
|
i += (12 + int64(length))
|
||||||
|
blockCount += 1
|
||||||
|
}
|
||||||
|
if opts.dumpBlocks {
|
||||||
|
println("Blocks:")
|
||||||
|
tw.Flush()
|
||||||
|
println()
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Statistics\n")
|
||||||
|
fmt.Printf(" Blocks:\n")
|
||||||
|
fmt.Printf(" Total: %d Size: %d Min: %d Max: %d Avg: %d\n",
|
||||||
|
blockCount, blockSize, blockStats.min, blockStats.max, blockSize/blockCount)
|
||||||
|
fmt.Printf(" Index:\n")
|
||||||
|
fmt.Printf(" Total: %d Size: %d\n", len(index.blocks), indexSize)
|
||||||
|
fmt.Printf(" Points:\n")
|
||||||
|
fmt.Printf(" Total: %d", pointCount)
|
||||||
|
println()
|
||||||
|
|
||||||
|
println(" Encoding:")
|
||||||
|
for i, counts := range blockStats.counts {
|
||||||
|
if len(counts) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf(" %s: ", strings.Title(fieldType[i]))
|
||||||
|
for j, v := range counts {
|
||||||
|
fmt.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100))
|
||||||
|
}
|
||||||
|
println()
|
||||||
|
}
|
||||||
|
fmt.Printf(" Compression:\n")
|
||||||
|
fmt.Printf(" Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount))
|
||||||
|
fmt.Printf(" Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount))
|
||||||
|
|
||||||
|
if len(errors) > 0 {
|
||||||
|
println()
|
||||||
|
fmt.Printf("Errors (%d):\n", len(errors))
|
||||||
|
for _, err := range errors {
|
||||||
|
fmt.Printf(" * %v\n", err)
|
||||||
|
}
|
||||||
|
println()
|
||||||
|
}
|
||||||
|
}
|
|
@ -13,7 +13,7 @@ channel_buffer_size = 100000
|
||||||
[[series]]
|
[[series]]
|
||||||
tick = "1ns"
|
tick = "1ns"
|
||||||
jitter = true
|
jitter = true
|
||||||
point_count = 100000 # number of points that will be written for each of the series
|
point_count = 1000000 # number of points that will be written for each of the series
|
||||||
measurement = "cpu"
|
measurement = "cpu"
|
||||||
series_count = 100000
|
series_count = 100000
|
||||||
|
|
||||||
|
|
57
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go
generated
vendored
57
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go
generated
vendored
|
@ -11,23 +11,15 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
batchSize = flag.Int("batchsize", 5000, "number of points per batch")
|
batchSize = flag.Int("batchsize", 0, "number of points per batch")
|
||||||
seriesCount = flag.Int("series", 100000, "number of unique series to create")
|
concurrency = flag.Int("concurrency", 0, "number of simultaneous writes to run")
|
||||||
pointCount = flag.Int("points", 100, "number of points per series to create")
|
|
||||||
concurrency = flag.Int("concurrency", 10, "number of simultaneous writes to run")
|
|
||||||
batchInterval = flag.Duration("batchinterval", 0*time.Second, "duration between batches")
|
batchInterval = flag.Duration("batchinterval", 0*time.Second, "duration between batches")
|
||||||
database = flag.String("database", "stress", "name of database")
|
database = flag.String("database", "", "name of database")
|
||||||
address = flag.String("addr", "localhost:8086", "IP address and port of database (e.g., localhost:8086)")
|
address = flag.String("addr", "", "IP address and port of database (e.g., localhost:8086)")
|
||||||
precision = flag.String("precision", "n", "The precision that points in the database will be with")
|
precision = flag.String("precision", "", "The precision that points in the database will be with")
|
||||||
test = flag.String("test", "", "The stress test file")
|
test = flag.String("test", "", "The stress test file")
|
||||||
)
|
)
|
||||||
|
|
||||||
var ms runner.Measurements
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
flag.Var(&ms, "m", "comma-separated list of intervals to use between events")
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var cfg *runner.Config
|
var cfg *runner.Config
|
||||||
var err error
|
var err error
|
||||||
|
@ -35,26 +27,45 @@ func main() {
|
||||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
cfg = runner.NewConfig()
|
if *test == "" {
|
||||||
|
fmt.Println("'-test' flag is required")
|
||||||
if len(ms) == 0 {
|
return
|
||||||
ms = append(ms, "cpu")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, m := range ms {
|
|
||||||
cfg.Series = append(cfg.Series, runner.NewSeries(m, 100, 100000))
|
|
||||||
}
|
|
||||||
|
|
||||||
if *test != "" {
|
|
||||||
cfg, err = runner.DecodeFile(*test)
|
cfg, err = runner.DecodeFile(*test)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%#v\n", cfg.Write)
|
||||||
|
|
||||||
|
if *batchSize != 0 {
|
||||||
|
cfg.Write.BatchSize = *batchSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if *concurrency != 0 {
|
||||||
|
cfg.Write.Concurrency = *concurrency
|
||||||
|
}
|
||||||
|
|
||||||
|
if *batchInterval != 0*time.Second {
|
||||||
|
cfg.Write.BatchInterval = batchInterval.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
if *database != "" {
|
||||||
|
cfg.Write.Database = *database
|
||||||
|
}
|
||||||
|
|
||||||
|
if *address != "" {
|
||||||
|
cfg.Write.Address = *address
|
||||||
|
}
|
||||||
|
|
||||||
|
if *precision != "" {
|
||||||
|
cfg.Write.Precision = *precision
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%#v\n", cfg.Write)
|
||||||
|
|
||||||
d := make(chan struct{})
|
d := make(chan struct{})
|
||||||
seriesQueryResults := make(chan runner.QueryResults)
|
seriesQueryResults := make(chan runner.QueryResults)
|
||||||
|
|
||||||
|
|
|
@ -23,10 +23,13 @@ import (
|
||||||
"github.com/influxdb/influxdb/services/opentsdb"
|
"github.com/influxdb/influxdb/services/opentsdb"
|
||||||
"github.com/influxdb/influxdb/services/precreator"
|
"github.com/influxdb/influxdb/services/precreator"
|
||||||
"github.com/influxdb/influxdb/services/retention"
|
"github.com/influxdb/influxdb/services/retention"
|
||||||
|
"github.com/influxdb/influxdb/services/subscriber"
|
||||||
"github.com/influxdb/influxdb/services/udp"
|
"github.com/influxdb/influxdb/services/udp"
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
"github.com/influxdb/influxdb/tsdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const DefaultEnterpriseURL = "https://enterprise.influxdata.com"
|
||||||
|
|
||||||
// Config represents the configuration format for the influxd binary.
|
// Config represents the configuration format for the influxd binary.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Meta *meta.Config `toml:"meta"`
|
Meta *meta.Config `toml:"meta"`
|
||||||
|
@ -37,6 +40,7 @@ type Config struct {
|
||||||
|
|
||||||
Admin admin.Config `toml:"admin"`
|
Admin admin.Config `toml:"admin"`
|
||||||
Monitor monitor.Config `toml:"monitor"`
|
Monitor monitor.Config `toml:"monitor"`
|
||||||
|
Subscriber subscriber.Config `toml:"subscriber"`
|
||||||
HTTPD httpd.Config `toml:"http"`
|
HTTPD httpd.Config `toml:"http"`
|
||||||
Graphites []graphite.Config `toml:"graphite"`
|
Graphites []graphite.Config `toml:"graphite"`
|
||||||
Collectd collectd.Config `toml:"collectd"`
|
Collectd collectd.Config `toml:"collectd"`
|
||||||
|
@ -50,11 +54,16 @@ type Config struct {
|
||||||
|
|
||||||
// Server reporting
|
// Server reporting
|
||||||
ReportingDisabled bool `toml:"reporting-disabled"`
|
ReportingDisabled bool `toml:"reporting-disabled"`
|
||||||
|
|
||||||
|
// Server registration
|
||||||
|
EnterpriseURL string `toml:"enterprise-url"`
|
||||||
|
EnterpriseToken string `toml:"enterprise-token"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConfig returns an instance of Config with reasonable defaults.
|
// NewConfig returns an instance of Config with reasonable defaults.
|
||||||
func NewConfig() *Config {
|
func NewConfig() *Config {
|
||||||
c := &Config{}
|
c := &Config{}
|
||||||
|
c.EnterpriseURL = DefaultEnterpriseURL
|
||||||
c.Meta = meta.NewConfig()
|
c.Meta = meta.NewConfig()
|
||||||
c.Data = tsdb.NewConfig()
|
c.Data = tsdb.NewConfig()
|
||||||
c.Cluster = cluster.NewConfig()
|
c.Cluster = cluster.NewConfig()
|
||||||
|
@ -62,6 +71,7 @@ func NewConfig() *Config {
|
||||||
|
|
||||||
c.Admin = admin.NewConfig()
|
c.Admin = admin.NewConfig()
|
||||||
c.Monitor = monitor.NewConfig()
|
c.Monitor = monitor.NewConfig()
|
||||||
|
c.Subscriber = subscriber.NewConfig()
|
||||||
c.HTTPD = httpd.NewConfig()
|
c.HTTPD = httpd.NewConfig()
|
||||||
c.Collectd = collectd.NewConfig()
|
c.Collectd = collectd.NewConfig()
|
||||||
c.OpenTSDB = opentsdb.NewConfig()
|
c.OpenTSDB = opentsdb.NewConfig()
|
||||||
|
@ -141,7 +151,7 @@ func (c *Config) applyEnvOverrides(prefix string, spec reflect.Value) error {
|
||||||
configName := typeOfSpec.Field(i).Tag.Get("toml")
|
configName := typeOfSpec.Field(i).Tag.Get("toml")
|
||||||
// Replace hyphens with underscores to avoid issues with shells
|
// Replace hyphens with underscores to avoid issues with shells
|
||||||
configName = strings.Replace(configName, "-", "_", -1)
|
configName = strings.Replace(configName, "-", "_", -1)
|
||||||
fieldName := typeOfSpec.Field(i).Name
|
fieldKey := typeOfSpec.Field(i).Name
|
||||||
|
|
||||||
// Skip any fields that we cannot set
|
// Skip any fields that we cannot set
|
||||||
if f.CanSet() || f.Kind() == reflect.Slice {
|
if f.CanSet() || f.Kind() == reflect.Slice {
|
||||||
|
@ -188,14 +198,14 @@ func (c *Config) applyEnvOverrides(prefix string, spec reflect.Value) error {
|
||||||
if f.Type().Name() == "Duration" {
|
if f.Type().Name() == "Duration" {
|
||||||
dur, err := time.ParseDuration(value)
|
dur, err := time.ParseDuration(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value)
|
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldKey, f.Type().String(), value)
|
||||||
}
|
}
|
||||||
intValue = dur.Nanoseconds()
|
intValue = dur.Nanoseconds()
|
||||||
} else {
|
} else {
|
||||||
var err error
|
var err error
|
||||||
intValue, err = strconv.ParseInt(value, 0, f.Type().Bits())
|
intValue, err = strconv.ParseInt(value, 0, f.Type().Bits())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value)
|
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldKey, f.Type().String(), value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,14 +213,14 @@ func (c *Config) applyEnvOverrides(prefix string, spec reflect.Value) error {
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
boolValue, err := strconv.ParseBool(value)
|
boolValue, err := strconv.ParseBool(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value)
|
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldKey, f.Type().String(), value)
|
||||||
|
|
||||||
}
|
}
|
||||||
f.SetBool(boolValue)
|
f.SetBool(boolValue)
|
||||||
case reflect.Float32, reflect.Float64:
|
case reflect.Float32, reflect.Float64:
|
||||||
floatValue, err := strconv.ParseFloat(value, f.Type().Bits())
|
floatValue, err := strconv.ParseFloat(value, f.Type().Bits())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value)
|
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldKey, f.Type().String(), value)
|
||||||
|
|
||||||
}
|
}
|
||||||
f.SetFloat(floatValue)
|
f.SetFloat(floatValue)
|
||||||
|
|
21
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go
generated
vendored
21
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go
generated
vendored
|
@ -13,6 +13,8 @@ func TestConfig_Parse(t *testing.T) {
|
||||||
// Parse configuration.
|
// Parse configuration.
|
||||||
var c run.Config
|
var c run.Config
|
||||||
if _, err := toml.Decode(`
|
if _, err := toml.Decode(`
|
||||||
|
enterprise-token = "deadbeef"
|
||||||
|
|
||||||
[meta]
|
[meta]
|
||||||
dir = "/tmp/meta"
|
dir = "/tmp/meta"
|
||||||
|
|
||||||
|
@ -45,6 +47,9 @@ bind-address = ":4444"
|
||||||
[monitoring]
|
[monitoring]
|
||||||
enabled = true
|
enabled = true
|
||||||
|
|
||||||
|
[subscriber]
|
||||||
|
enabled = true
|
||||||
|
|
||||||
[continuous_queries]
|
[continuous_queries]
|
||||||
enabled = true
|
enabled = true
|
||||||
`, &c); err != nil {
|
`, &c); err != nil {
|
||||||
|
@ -52,7 +57,9 @@ enabled = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate configuration.
|
// Validate configuration.
|
||||||
if c.Meta.Dir != "/tmp/meta" {
|
if c.EnterpriseToken != "deadbeef" {
|
||||||
|
t.Fatalf("unexpected Enterprise token: %s", c.EnterpriseToken)
|
||||||
|
} else if c.Meta.Dir != "/tmp/meta" {
|
||||||
t.Fatalf("unexpected meta dir: %s", c.Meta.Dir)
|
t.Fatalf("unexpected meta dir: %s", c.Meta.Dir)
|
||||||
} else if c.Data.Dir != "/tmp/data" {
|
} else if c.Data.Dir != "/tmp/data" {
|
||||||
t.Fatalf("unexpected data dir: %s", c.Data.Dir)
|
t.Fatalf("unexpected data dir: %s", c.Data.Dir)
|
||||||
|
@ -72,6 +79,8 @@ enabled = true
|
||||||
t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDB.BindAddress)
|
t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDB.BindAddress)
|
||||||
} else if c.UDPs[0].BindAddress != ":4444" {
|
} else if c.UDPs[0].BindAddress != ":4444" {
|
||||||
t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress)
|
t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress)
|
||||||
|
} else if c.Subscriber.Enabled != true {
|
||||||
|
t.Fatalf("unexpected subscriber enabled: %v", c.Subscriber.Enabled)
|
||||||
} else if c.ContinuousQuery.Enabled != true {
|
} else if c.ContinuousQuery.Enabled != true {
|
||||||
t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled)
|
t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled)
|
||||||
}
|
}
|
||||||
|
@ -82,6 +91,8 @@ func TestConfig_Parse_EnvOverride(t *testing.T) {
|
||||||
// Parse configuration.
|
// Parse configuration.
|
||||||
var c run.Config
|
var c run.Config
|
||||||
if _, err := toml.Decode(`
|
if _, err := toml.Decode(`
|
||||||
|
enterprise-token = "deadbeef"
|
||||||
|
|
||||||
[meta]
|
[meta]
|
||||||
dir = "/tmp/meta"
|
dir = "/tmp/meta"
|
||||||
|
|
||||||
|
@ -120,6 +131,10 @@ enabled = true
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := os.Setenv("INFLUXDB_ENTERPRISE_TOKEN", "wheresthebeef"); err != nil {
|
||||||
|
t.Fatalf("failed to set env var: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if err := os.Setenv("INFLUXDB_UDP_BIND_ADDRESS", ":1234"); err != nil {
|
if err := os.Setenv("INFLUXDB_UDP_BIND_ADDRESS", ":1234"); err != nil {
|
||||||
t.Fatalf("failed to set env var: %v", err)
|
t.Fatalf("failed to set env var: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -132,6 +147,10 @@ enabled = true
|
||||||
t.Fatalf("failed to apply env overrides: %v", err)
|
t.Fatalf("failed to apply env overrides: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.EnterpriseToken != "wheresthebeef" {
|
||||||
|
t.Fatalf("unexpected Enterprise token: %s", c.EnterpriseToken)
|
||||||
|
}
|
||||||
|
|
||||||
if c.UDPs[0].BindAddress != ":4444" {
|
if c.UDPs[0].BindAddress != ":4444" {
|
||||||
t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress)
|
t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,9 @@ package run
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -26,6 +28,7 @@ import (
|
||||||
"github.com/influxdb/influxdb/services/precreator"
|
"github.com/influxdb/influxdb/services/precreator"
|
||||||
"github.com/influxdb/influxdb/services/retention"
|
"github.com/influxdb/influxdb/services/retention"
|
||||||
"github.com/influxdb/influxdb/services/snapshotter"
|
"github.com/influxdb/influxdb/services/snapshotter"
|
||||||
|
"github.com/influxdb/influxdb/services/subscriber"
|
||||||
"github.com/influxdb/influxdb/services/udp"
|
"github.com/influxdb/influxdb/services/udp"
|
||||||
"github.com/influxdb/influxdb/tcp"
|
"github.com/influxdb/influxdb/tcp"
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
"github.com/influxdb/influxdb/tsdb"
|
||||||
|
@ -60,6 +63,7 @@ type Server struct {
|
||||||
ShardWriter *cluster.ShardWriter
|
ShardWriter *cluster.ShardWriter
|
||||||
ShardMapper *cluster.ShardMapper
|
ShardMapper *cluster.ShardMapper
|
||||||
HintedHandoff *hh.Service
|
HintedHandoff *hh.Service
|
||||||
|
Subscriber *subscriber.Service
|
||||||
|
|
||||||
Services []Service
|
Services []Service
|
||||||
|
|
||||||
|
@ -70,8 +74,10 @@ type Server struct {
|
||||||
|
|
||||||
Monitor *monitor.Monitor
|
Monitor *monitor.Monitor
|
||||||
|
|
||||||
// Server reporting
|
// Server reporting and registration
|
||||||
reportingDisabled bool
|
reportingDisabled bool
|
||||||
|
enterpriseURL string
|
||||||
|
enterpriseToken string
|
||||||
|
|
||||||
// Profiling
|
// Profiling
|
||||||
CPUProfile string
|
CPUProfile string
|
||||||
|
@ -98,6 +104,8 @@ func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
|
||||||
Monitor: monitor.New(c.Monitor),
|
Monitor: monitor.New(c.Monitor),
|
||||||
|
|
||||||
reportingDisabled: c.ReportingDisabled,
|
reportingDisabled: c.ReportingDisabled,
|
||||||
|
enterpriseURL: c.EnterpriseURL,
|
||||||
|
enterpriseToken: c.EnterpriseToken,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy TSDB configuration.
|
// Copy TSDB configuration.
|
||||||
|
@ -125,7 +133,11 @@ func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
|
||||||
s.ShardWriter.MetaStore = s.MetaStore
|
s.ShardWriter.MetaStore = s.MetaStore
|
||||||
|
|
||||||
// Create the hinted handoff service
|
// Create the hinted handoff service
|
||||||
s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter)
|
s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaStore)
|
||||||
|
|
||||||
|
// Create the Subscriber service
|
||||||
|
s.Subscriber = subscriber.NewService(c.Subscriber)
|
||||||
|
s.Subscriber.MetaStore = s.MetaStore
|
||||||
|
|
||||||
// Initialize points writer.
|
// Initialize points writer.
|
||||||
s.PointsWriter = cluster.NewPointsWriter()
|
s.PointsWriter = cluster.NewPointsWriter()
|
||||||
|
@ -134,6 +146,10 @@ func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {
|
||||||
s.PointsWriter.TSDBStore = s.TSDBStore
|
s.PointsWriter.TSDBStore = s.TSDBStore
|
||||||
s.PointsWriter.ShardWriter = s.ShardWriter
|
s.PointsWriter.ShardWriter = s.ShardWriter
|
||||||
s.PointsWriter.HintedHandoff = s.HintedHandoff
|
s.PointsWriter.HintedHandoff = s.HintedHandoff
|
||||||
|
s.PointsWriter.Subscriber = s.Subscriber
|
||||||
|
|
||||||
|
// needed for executing INTO queries.
|
||||||
|
s.QueryExecutor.IntoWriter = s.PointsWriter
|
||||||
|
|
||||||
// Initialize the monitor
|
// Initialize the monitor
|
||||||
s.Monitor.Version = s.buildInfo.Version
|
s.Monitor.Version = s.buildInfo.Version
|
||||||
|
@ -289,6 +305,7 @@ func (s *Server) appendUDPService(c udp.Config) {
|
||||||
}
|
}
|
||||||
srv := udp.NewService(c)
|
srv := udp.NewService(c)
|
||||||
srv.PointsWriter = s.PointsWriter
|
srv.PointsWriter = s.PointsWriter
|
||||||
|
srv.MetaStore = s.MetaStore
|
||||||
s.Services = append(s.Services, srv)
|
s.Services = append(s.Services, srv)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -299,7 +316,6 @@ func (s *Server) appendContinuousQueryService(c continuous_querier.Config) {
|
||||||
srv := continuous_querier.NewService(c)
|
srv := continuous_querier.NewService(c)
|
||||||
srv.MetaStore = s.MetaStore
|
srv.MetaStore = s.MetaStore
|
||||||
srv.QueryExecutor = s.QueryExecutor
|
srv.QueryExecutor = s.QueryExecutor
|
||||||
srv.PointsWriter = s.PointsWriter
|
|
||||||
s.Services = append(s.Services, srv)
|
s.Services = append(s.Services, srv)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -371,6 +387,11 @@ func (s *Server) Open() error {
|
||||||
return fmt.Errorf("open hinted handoff: %s", err)
|
return fmt.Errorf("open hinted handoff: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Open the subcriber service
|
||||||
|
if err := s.Subscriber.Open(); err != nil {
|
||||||
|
return fmt.Errorf("open subscriber: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
for _, service := range s.Services {
|
for _, service := range s.Services {
|
||||||
if err := service.Open(); err != nil {
|
if err := service.Open(); err != nil {
|
||||||
return fmt.Errorf("open service: %s", err)
|
return fmt.Errorf("open service: %s", err)
|
||||||
|
@ -382,6 +403,11 @@ func (s *Server) Open() error {
|
||||||
go s.startServerReporting()
|
go s.startServerReporting()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Register server
|
||||||
|
if err := s.registerServer(); err != nil {
|
||||||
|
log.Printf("failed to register server: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}(); err != nil {
|
}(); err != nil {
|
||||||
|
@ -420,6 +446,10 @@ func (s *Server) Close() error {
|
||||||
s.TSDBStore.Close()
|
s.TSDBStore.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.Subscriber != nil {
|
||||||
|
s.Subscriber.Close()
|
||||||
|
}
|
||||||
|
|
||||||
// Finally close the meta-store since everything else depends on it
|
// Finally close the meta-store since everything else depends on it
|
||||||
if s.MetaStore != nil {
|
if s.MetaStore != nil {
|
||||||
s.MetaStore.Close()
|
s.MetaStore.Close()
|
||||||
|
@ -489,6 +519,59 @@ func (s *Server) reportServer() {
|
||||||
go client.Post("http://m.influxdb.com:8086/db/reporting/series?u=reporter&p=influxdb", "application/json", data)
|
go client.Post("http://m.influxdb.com:8086/db/reporting/series?u=reporter&p=influxdb", "application/json", data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// registerServer registers the server on start-up.
|
||||||
|
func (s *Server) registerServer() error {
|
||||||
|
if s.enterpriseToken == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
clusterID, err := s.MetaStore.ClusterID()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("failed to retrieve cluster ID for registration: %s", err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
hostname, err := os.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
j := map[string]interface{}{
|
||||||
|
"cluster_id": fmt.Sprintf("%d", clusterID),
|
||||||
|
"server_id": fmt.Sprintf("%d", s.MetaStore.NodeID()),
|
||||||
|
"host": hostname,
|
||||||
|
"product": "influxdb",
|
||||||
|
"version": s.buildInfo.Version,
|
||||||
|
}
|
||||||
|
b, err := json.Marshal(j)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
url := fmt.Sprintf("%s/api/v1/servers?token=%s", s.enterpriseURL, s.enterpriseToken)
|
||||||
|
go func() {
|
||||||
|
client := http.Client{Timeout: time.Duration(5 * time.Second)}
|
||||||
|
resp, err := client.Post(url, "application/json", bytes.NewBuffer(b))
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("failed to register server with %s: %s", s.enterpriseURL, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusCreated {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("failed to read response from registration server: %s", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Printf("failed to register server with %s: received code %s, body: %s", s.enterpriseURL, resp.Status, string(body))
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// monitorErrorChan reads an error channel and resends it through the server.
|
// monitorErrorChan reads an error channel and resends it through the server.
|
||||||
func (s *Server) monitorErrorChan(ch <-chan error) {
|
func (s *Server) monitorErrorChan(ch <-chan error) {
|
||||||
for {
|
for {
|
||||||
|
|
272
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go
generated
vendored
272
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go
generated
vendored
|
@ -66,18 +66,43 @@ func TestServer_DatabaseCommands(t *testing.T) {
|
||||||
command: `SHOW DATABASES`,
|
command: `SHOW DATABASES`,
|
||||||
exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db1"]]}]}]}`,
|
exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db1"]]}]}]}`,
|
||||||
},
|
},
|
||||||
|
&Query{
|
||||||
|
name: "rename database should succeed",
|
||||||
|
command: `ALTER DATABASE db1 RENAME TO db2`,
|
||||||
|
exp: `{"results":[{}]}`,
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "show databases should reflect change of name",
|
||||||
|
command: `SHOW DATABASES`,
|
||||||
|
exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db2"]]}]}]}`,
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "rename non-existent database should fail",
|
||||||
|
command: `ALTER DATABASE db4 RENAME TO db5`,
|
||||||
|
exp: `{"results":[{"error":"database not found"}]}`,
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "rename database to illegal name should fail",
|
||||||
|
command: `ALTER DATABASE db2 RENAME TO 0xdb0`,
|
||||||
|
exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 30"}`,
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "rename database to already existing datbase should fail",
|
||||||
|
command: `ALTER DATABASE db2 RENAME TO db0`,
|
||||||
|
exp: `{"results":[{"error":"database already exists"}]}`,
|
||||||
|
},
|
||||||
&Query{
|
&Query{
|
||||||
name: "drop database db0 should succeed",
|
name: "drop database db0 should succeed",
|
||||||
command: `DROP DATABASE db0`,
|
command: `DROP DATABASE db0`,
|
||||||
exp: `{"results":[{}]}`,
|
exp: `{"results":[{}]}`,
|
||||||
},
|
},
|
||||||
&Query{
|
&Query{
|
||||||
name: "drop database db1 should succeed",
|
name: "drop database db2 should succeed",
|
||||||
command: `DROP DATABASE db1`,
|
command: `DROP DATABASE db2`,
|
||||||
exp: `{"results":[{}]}`,
|
exp: `{"results":[{}]}`,
|
||||||
},
|
},
|
||||||
&Query{
|
&Query{
|
||||||
name: "show database should have no results",
|
name: "show databases should have no results after dropping all databases",
|
||||||
command: `SHOW DATABASES`,
|
command: `SHOW DATABASES`,
|
||||||
exp: `{"results":[{"series":[{"name":"databases","columns":["name"]}]}]}`,
|
exp: `{"results":[{"series":[{"name":"databases","columns":["name"]}]}]}`,
|
||||||
},
|
},
|
||||||
|
@ -241,6 +266,96 @@ func TestServer_Query_DropDatabaseIsolated(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestServer_Query_RenameDatabase(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s := OpenServer(NewConfig(), "")
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
writes := []string{
|
||||||
|
fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||||
|
}
|
||||||
|
|
||||||
|
test := NewTest("db0", "rp0")
|
||||||
|
test.write = strings.Join(writes, "\n")
|
||||||
|
|
||||||
|
test.addQueries([]*Query{
|
||||||
|
&Query{
|
||||||
|
name: "Query data from db0 database",
|
||||||
|
command: `SELECT * FROM cpu`,
|
||||||
|
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`,
|
||||||
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "Query data from db0 database with GROUP BY *",
|
||||||
|
command: `SELECT * FROM cpu GROUP BY *`,
|
||||||
|
exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`,
|
||||||
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "Create continuous query using db0",
|
||||||
|
command: `CREATE CONTINUOUS QUERY "cq1" ON db0 BEGIN SELECT count(value) INTO "rp1".:MEASUREMENT FROM cpu GROUP BY time(5s) END`,
|
||||||
|
exp: `{"results":[{}]}`,
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "Rename database should fail because of conflicting CQ",
|
||||||
|
command: `ALTER DATABASE db0 RENAME TO db1`,
|
||||||
|
exp: `{"results":[{"error":"database rename conflict with existing continuous query"}]}`,
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "Drop conflicting CQ",
|
||||||
|
command: `DROP CONTINUOUS QUERY "cq1" on db0`,
|
||||||
|
exp: `{"results":[{}]}`,
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "Rename database should succeed now",
|
||||||
|
command: `ALTER DATABASE db0 RENAME TO db1`,
|
||||||
|
exp: `{"results":[{}]}`,
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "Query data from db0 database and ensure it's gone",
|
||||||
|
command: `SELECT * FROM cpu`,
|
||||||
|
exp: `{"results":[{"error":"database not found: db0"}]}`,
|
||||||
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "Query data from now renamed database db1 and ensure that's there",
|
||||||
|
command: `SELECT * FROM cpu`,
|
||||||
|
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`,
|
||||||
|
params: url.Values{"db": []string{"db1"}},
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "Query data from now renamed database db1 and ensure it's still there with GROUP BY *",
|
||||||
|
command: `SELECT * FROM cpu GROUP BY *`,
|
||||||
|
exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`,
|
||||||
|
params: url.Values{"db": []string{"db1"}},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
|
||||||
|
for i, query := range test.queries {
|
||||||
|
if i == 0 {
|
||||||
|
if err := test.init(s); err != nil {
|
||||||
|
t.Fatalf("test init failed: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query.skip {
|
||||||
|
t.Logf("SKIP:: %s", query.name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := query.Execute(s); err != nil {
|
||||||
|
t.Error(query.Error(err))
|
||||||
|
} else if !query.success() {
|
||||||
|
t.Error(query.failureMessage())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestServer_Query_DropAndRecreateSeries(t *testing.T) {
|
func TestServer_Query_DropAndRecreateSeries(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
s := OpenServer(NewConfig(), "")
|
s := OpenServer(NewConfig(), "")
|
||||||
|
@ -382,6 +497,24 @@ func TestServer_Query_DropSeriesFromRegex(t *testing.T) {
|
||||||
exp: `{"results":[{"series":[{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`,
|
exp: `{"results":[{"series":[{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`,
|
||||||
params: url.Values{"db": []string{"db0"}},
|
params: url.Values{"db": []string{"db0"}},
|
||||||
},
|
},
|
||||||
|
&Query{
|
||||||
|
name: "Drop series with WHERE field should error",
|
||||||
|
command: `DROP SERIES FROM c WHERE val > 50.0`,
|
||||||
|
exp: `{"results":[{"error":"DROP SERIES doesn't support fields in WHERE clause"}]}`,
|
||||||
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "make sure DROP SERIES with field in WHERE didn't delete data",
|
||||||
|
command: `SHOW SERIES`,
|
||||||
|
exp: `{"results":[{"series":[{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`,
|
||||||
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "Drop series with WHERE time should error",
|
||||||
|
command: `DROP SERIES FROM c WHERE time > now() - 1d`,
|
||||||
|
exp: `{"results":[{"error":"DROP SERIES doesn't support time in WHERE clause"}]}`,
|
||||||
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
},
|
||||||
}...)
|
}...)
|
||||||
|
|
||||||
for i, query := range test.queries {
|
for i, query := range test.queries {
|
||||||
|
@ -1258,6 +1391,9 @@ func TestServer_Query_Tags(t *testing.T) {
|
||||||
|
|
||||||
fmt.Sprintf("cpu3,company=acme01 value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()),
|
fmt.Sprintf("cpu3,company=acme01 value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()),
|
||||||
fmt.Sprintf("cpu3 value=200 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()),
|
fmt.Sprintf("cpu3 value=200 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()),
|
||||||
|
|
||||||
|
fmt.Sprintf("status_code,url=http://www.example.com value=404 %d", mustParseTime(time.RFC3339Nano, "2015-07-22T08:13:54.929026672Z").UnixNano()),
|
||||||
|
fmt.Sprintf("status_code,url=https://influxdb.com value=418 %d", mustParseTime(time.RFC3339Nano, "2015-07-22T09:52:24.914395083Z").UnixNano()),
|
||||||
}
|
}
|
||||||
|
|
||||||
test := NewTest("db0", "rp0")
|
test := NewTest("db0", "rp0")
|
||||||
|
@ -1379,6 +1515,16 @@ func TestServer_Query_Tags(t *testing.T) {
|
||||||
command: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme[23]/`,
|
command: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme[23]/`,
|
||||||
exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`,
|
exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`,
|
||||||
},
|
},
|
||||||
|
&Query{
|
||||||
|
name: "single field (regex tag match with escaping)",
|
||||||
|
command: `SELECT value FROM db0.rp0.status_code WHERE url !~ /https\:\/\/influxdb\.com/`,
|
||||||
|
exp: `{"results":[{"series":[{"name":"status_code","columns":["time","value"],"values":[["2015-07-22T08:13:54.929026672Z",404]]}]}]}`,
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "single field (regex tag match with escaping)",
|
||||||
|
command: `SELECT value FROM db0.rp0.status_code WHERE url =~ /https\:\/\/influxdb\.com/`,
|
||||||
|
exp: `{"results":[{"series":[{"name":"status_code","columns":["time","value"],"values":[["2015-07-22T09:52:24.914395083Z",418]]}]}]}`,
|
||||||
|
},
|
||||||
}...)
|
}...)
|
||||||
|
|
||||||
if err := test.init(s); err != nil {
|
if err := test.init(s); err != nil {
|
||||||
|
@ -1997,6 +2143,12 @@ func TestServer_Query_AggregatesCommon(t *testing.T) {
|
||||||
command: `SELECT FIRST(value) FROM intmany`,
|
command: `SELECT FIRST(value) FROM intmany`,
|
||||||
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`,
|
exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`,
|
||||||
},
|
},
|
||||||
|
&Query{
|
||||||
|
name: "first - int - epoch ms",
|
||||||
|
params: url.Values{"db": []string{"db0"}, "epoch": []string{"ms"}},
|
||||||
|
command: `SELECT FIRST(value) FROM intmany`,
|
||||||
|
exp: fmt.Sprintf(`{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[[%d,2]]}]}]}`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()/int64(time.Millisecond)),
|
||||||
|
},
|
||||||
&Query{
|
&Query{
|
||||||
name: "last - int",
|
name: "last - int",
|
||||||
params: url.Values{"db": []string{"db0"}},
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
@ -2418,6 +2570,17 @@ func TestServer_Query_AggregateSelectors(t *testing.T) {
|
||||||
command: `SELECT max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,
|
command: `SELECT max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,
|
||||||
exp: `{"results":[{"series":[{"name":"network","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",90]]}]}]}`,
|
exp: `{"results":[{"series":[{"name":"network","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",90]]}]}]}`,
|
||||||
},
|
},
|
||||||
|
&Query{
|
||||||
|
name: "max - baseline 30s - epoch ms",
|
||||||
|
params: url.Values{"db": []string{"db0"}, "epoch": []string{"ms"}},
|
||||||
|
command: `SELECT max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,
|
||||||
|
exp: fmt.Sprintf(
|
||||||
|
`{"results":[{"series":[{"name":"network","columns":["time","max"],"values":[[%d,40],[%d,50],[%d,90]]}]}]}`,
|
||||||
|
mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()/int64(time.Millisecond),
|
||||||
|
mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()/int64(time.Millisecond),
|
||||||
|
mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()/int64(time.Millisecond),
|
||||||
|
),
|
||||||
|
},
|
||||||
&Query{
|
&Query{
|
||||||
name: "max - tx",
|
name: "max - tx",
|
||||||
params: url.Values{"db": []string{"db0"}},
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
@ -2460,6 +2623,12 @@ func TestServer_Query_AggregateSelectors(t *testing.T) {
|
||||||
command: `SELECT time, tx, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,
|
command: `SELECT time, tx, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,
|
||||||
exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","min"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:20Z",4,5]]}]}]}`,
|
exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","min"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:20Z",4,5]]}]}]}`,
|
||||||
},
|
},
|
||||||
|
&Query{
|
||||||
|
name: "max,min - baseline 30s",
|
||||||
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
command: `SELECT max(rx), min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,
|
||||||
|
exp: `{"results":[{"series":[{"name":"network","columns":["time","max","min"],"values":[["2000-01-01T00:00:00Z",40,10],["2000-01-01T00:00:30Z",50,40],["2000-01-01T00:01:00Z",90,5]]}]}]}`,
|
||||||
|
},
|
||||||
&Query{
|
&Query{
|
||||||
name: "first - baseline 30s",
|
name: "first - baseline 30s",
|
||||||
params: url.Values{"db": []string{"db0"}},
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
@ -2742,6 +2911,17 @@ func TestServer_Query_TopInt(t *testing.T) {
|
||||||
command: `SELECT time, TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
|
command: `SELECT time, TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
|
||||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`,
|
exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`,
|
||||||
},
|
},
|
||||||
|
&Query{
|
||||||
|
name: "top - cpu - time specified - hourly - epoch ms",
|
||||||
|
params: url.Values{"db": []string{"db0"}, "epoch": []string{"ms"}},
|
||||||
|
command: `SELECT time, TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
|
||||||
|
exp: fmt.Sprintf(
|
||||||
|
`{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[[%d,4],[%d,7],[%d,9]]}]}]}`,
|
||||||
|
mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()/int64(time.Millisecond),
|
||||||
|
mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:10Z").UnixNano()/int64(time.Millisecond),
|
||||||
|
mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:10Z").UnixNano()/int64(time.Millisecond),
|
||||||
|
),
|
||||||
|
},
|
||||||
&Query{
|
&Query{
|
||||||
name: "top - cpu - time specified (not first) - hourly",
|
name: "top - cpu - time specified (not first) - hourly",
|
||||||
params: url.Values{"db": []string{"db0"}},
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
@ -3280,7 +3460,7 @@ func TestServer_Query_WildcardExpansion(t *testing.T) {
|
||||||
exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","c","h","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`,
|
exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","c","h","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`,
|
||||||
},
|
},
|
||||||
&Query{
|
&Query{
|
||||||
name: "duplicate tag and field name, always favor field over tag",
|
name: "duplicate tag and field key, always favor field over tag",
|
||||||
command: `SELECT * FROM dupnames`,
|
command: `SELECT * FROM dupnames`,
|
||||||
params: url.Values{"db": []string{"db0"}},
|
params: url.Values{"db": []string{"db0"}},
|
||||||
exp: `{"results":[{"series":[{"name":"dupnames","columns":["time","day","region","value"],"values":[["2000-01-01T00:00:00Z",3,"us-east",10],["2000-01-01T00:00:10Z",2,"us-east",20],["2000-01-01T00:00:20Z",1,"us-west",30]]}]}]}`,
|
exp: `{"results":[{"series":[{"name":"dupnames","columns":["time","day","region","value"],"values":[["2000-01-01T00:00:00Z",3,"us-east",10],["2000-01-01T00:00:10Z",2,"us-east",20],["2000-01-01T00:00:20Z",1,"us-west",30]]}]}]}`,
|
||||||
|
@ -4127,6 +4307,18 @@ func TestServer_Query_ShowSeries(t *testing.T) {
|
||||||
exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]}]}]}`,
|
exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]}]}]}`,
|
||||||
params: url.Values{"db": []string{"db0"}},
|
params: url.Values{"db": []string{"db0"}},
|
||||||
},
|
},
|
||||||
|
&Query{
|
||||||
|
name: `show series with WHERE time should fail`,
|
||||||
|
command: "SHOW SERIES WHERE time > now() - 1h",
|
||||||
|
exp: `{"results":[{"error":"SHOW SERIES doesn't support time in WHERE clause"}]}`,
|
||||||
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: `show series with WHERE field should fail`,
|
||||||
|
command: "SHOW SERIES WHERE value > 10.0",
|
||||||
|
exp: `{"results":[{"error":"SHOW SERIES doesn't support fields in WHERE clause"}]}`,
|
||||||
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
},
|
||||||
}...)
|
}...)
|
||||||
|
|
||||||
for i, query := range test.queries {
|
for i, query := range test.queries {
|
||||||
|
@ -4191,6 +4383,12 @@ func TestServer_Query_ShowMeasurements(t *testing.T) {
|
||||||
exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`,
|
exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`,
|
||||||
params: url.Values{"db": []string{"db0"}},
|
params: url.Values{"db": []string{"db0"}},
|
||||||
},
|
},
|
||||||
|
&Query{
|
||||||
|
name: `show measurements with time in WHERE clauses errors`,
|
||||||
|
command: `SHOW MEASUREMENTS WHERE time > now() - 1h`,
|
||||||
|
exp: `{"results":[{"error":"SHOW MEASUREMENTS doesn't support time in WHERE clause"}]}`,
|
||||||
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
},
|
||||||
}...)
|
}...)
|
||||||
|
|
||||||
for i, query := range test.queries {
|
for i, query := range test.queries {
|
||||||
|
@ -4261,6 +4459,12 @@ func TestServer_Query_ShowTagKeys(t *testing.T) {
|
||||||
exp: `{"results":[{}]}`,
|
exp: `{"results":[{}]}`,
|
||||||
params: url.Values{"db": []string{"db0"}},
|
params: url.Values{"db": []string{"db0"}},
|
||||||
},
|
},
|
||||||
|
&Query{
|
||||||
|
name: "show tag keys with time in WHERE clause errors",
|
||||||
|
command: "SHOW TAG KEYS FROM cpu WHERE time > now() - 1h",
|
||||||
|
exp: `{"results":[{"error":"SHOW TAG KEYS doesn't support time in WHERE clause"}]}`,
|
||||||
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
},
|
||||||
&Query{
|
&Query{
|
||||||
name: "show tag values with key",
|
name: "show tag values with key",
|
||||||
command: "SHOW TAG VALUES WITH KEY = host",
|
command: "SHOW TAG VALUES WITH KEY = host",
|
||||||
|
@ -4297,6 +4501,12 @@ func TestServer_Query_ShowTagKeys(t *testing.T) {
|
||||||
exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"],["server02"],["server03"]]}]}]}`,
|
exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"],["server02"],["server03"]]}]}]}`,
|
||||||
params: url.Values{"db": []string{"db0"}},
|
params: url.Values{"db": []string{"db0"}},
|
||||||
},
|
},
|
||||||
|
&Query{
|
||||||
|
name: `show tag values with key and time in WHERE clause should error`,
|
||||||
|
command: `SHOW TAG VALUES WITH KEY = host WHERE time > now() - 1h`,
|
||||||
|
exp: `{"results":[{"error":"SHOW TAG VALUES doesn't support time in WHERE clause"}]}`,
|
||||||
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
},
|
||||||
}...)
|
}...)
|
||||||
|
|
||||||
for i, query := range test.queries {
|
for i, query := range test.queries {
|
||||||
|
@ -4780,3 +4990,57 @@ func TestServer_Query_FieldWithMultiplePeriodsMeasurementPrefixMatch(t *testing.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestServer_Query_IntoTarget(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s := OpenServer(NewConfig(), "")
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
writes := []string{
|
||||||
|
fmt.Sprintf(`foo value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
||||||
|
fmt.Sprintf(`foo value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()),
|
||||||
|
fmt.Sprintf(`foo value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()),
|
||||||
|
fmt.Sprintf(`foo value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()),
|
||||||
|
}
|
||||||
|
|
||||||
|
test := NewTest("db0", "rp0")
|
||||||
|
test.write = strings.Join(writes, "\n")
|
||||||
|
|
||||||
|
test.addQueries([]*Query{
|
||||||
|
&Query{
|
||||||
|
name: "into",
|
||||||
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
command: `SELECT value AS something INTO baz FROM foo`,
|
||||||
|
exp: `{"results":[{"series":[{"name":"result","columns":["time","written"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`,
|
||||||
|
},
|
||||||
|
&Query{
|
||||||
|
name: "confirm results",
|
||||||
|
params: url.Values{"db": []string{"db0"}},
|
||||||
|
command: `SELECT something FROM baz`,
|
||||||
|
exp: `{"results":[{"series":[{"name":"baz","columns":["time","something"],"values":[["2000-01-01T00:00:00Z",1],["2000-01-01T00:00:10Z",2],["2000-01-01T00:00:20Z",3],["2000-01-01T00:00:30Z",4]]}]}]}`,
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
|
||||||
|
if err := test.init(s); err != nil {
|
||||||
|
t.Fatalf("test init failed: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, query := range test.queries {
|
||||||
|
if query.skip {
|
||||||
|
t.Logf("SKIP:: %s", query.name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := query.Execute(s); err != nil {
|
||||||
|
t.Error(query.Error(err))
|
||||||
|
} else if !query.success() {
|
||||||
|
t.Error(query.failureMessage())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,10 +1,8 @@
|
||||||
package influxdb
|
package influxdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -26,18 +24,6 @@ func ErrRetentionPolicyNotFound(name string) error {
|
||||||
return fmt.Errorf("retention policy not found: %s", name)
|
return fmt.Errorf("retention policy not found: %s", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func errMeasurementNotFound(name string) error { return fmt.Errorf("measurement not found: %s", name) }
|
|
||||||
|
|
||||||
func errorf(format string, a ...interface{}) (err error) {
|
|
||||||
if _, file, line, ok := runtime.Caller(2); ok {
|
|
||||||
a = append(a, file, line)
|
|
||||||
err = fmt.Errorf(format+" (%s:%d)", a...)
|
|
||||||
} else {
|
|
||||||
err = fmt.Errorf(format, a...)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsClientError indicates whether an error is a known client error.
|
// IsClientError indicates whether an error is a known client error.
|
||||||
func IsClientError(err error) bool {
|
func IsClientError(err error) bool {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -57,30 +43,3 @@ func IsClientError(err error) bool {
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// mustMarshal encodes a value to JSON.
|
|
||||||
// This will panic if an error occurs. This should only be used internally when
|
|
||||||
// an invalid marshal will cause corruption and a panic is appropriate.
|
|
||||||
func mustMarshalJSON(v interface{}) []byte {
|
|
||||||
b, err := json.Marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
panic("marshal: " + err.Error())
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// mustUnmarshalJSON decodes a value from JSON.
|
|
||||||
// This will panic if an error occurs. This should only be used internally when
|
|
||||||
// an invalid unmarshal will cause corruption and a panic is appropriate.
|
|
||||||
func mustUnmarshalJSON(b []byte, v interface{}) {
|
|
||||||
if err := json.Unmarshal(b, v); err != nil {
|
|
||||||
panic("unmarshal: " + err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// assert will panic with a given formatted message if the given condition is false.
|
|
||||||
func assert(condition bool, msg string, v ...interface{}) {
|
|
||||||
if !condition {
|
|
||||||
panic(fmt.Sprintf("assert failed: "+msg, v...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -8,6 +8,10 @@
|
||||||
# Change this option to true to disable reporting.
|
# Change this option to true to disable reporting.
|
||||||
reporting-disabled = false
|
reporting-disabled = false
|
||||||
|
|
||||||
|
# Enterprise registration control
|
||||||
|
# enterprise-url = "https://enterprise.influxdata.com" # The Enterprise server URL
|
||||||
|
# enterprise-token = "" # Registration token for Enterprise server
|
||||||
|
|
||||||
###
|
###
|
||||||
### [meta]
|
### [meta]
|
||||||
###
|
###
|
||||||
|
@ -235,7 +239,7 @@ reporting-disabled = false
|
||||||
[[udp]]
|
[[udp]]
|
||||||
enabled = false
|
enabled = false
|
||||||
# bind-address = ""
|
# bind-address = ""
|
||||||
# database = ""
|
# database = "udp"
|
||||||
# retention-policy = ""
|
# retention-policy = ""
|
||||||
|
|
||||||
# These next lines control how batching works. You should have this enabled
|
# These next lines control how batching works. You should have this enabled
|
||||||
|
|
|
@ -54,7 +54,7 @@ digit = "0" … "9" .
|
||||||
|
|
||||||
## Identifiers
|
## Identifiers
|
||||||
|
|
||||||
Identifiers are tokens which refer to database names, retention policy names, user names, measurement names, tag keys, and field names.
|
Identifiers are tokens which refer to database names, retention policy names, user names, measurement names, tag keys, and field keys.
|
||||||
|
|
||||||
The rules:
|
The rules:
|
||||||
|
|
||||||
|
@ -82,16 +82,17 @@ _cpu_stats
|
||||||
## Keywords
|
## Keywords
|
||||||
|
|
||||||
```
|
```
|
||||||
ALL ALTER AS ASC BEGIN BY
|
ALL ALTER ANY AS ASC BEGIN
|
||||||
CREATE CONTINUOUS DATABASE DATABASES DEFAULT DELETE
|
BY CREATE CONTINUOUS DATABASE DATABASES DEFAULT
|
||||||
DESC DROP DURATION END EXISTS EXPLAIN
|
DELETE DESC DESTINATIONS DROP DURATION END
|
||||||
FIELD FROM GRANT GROUP IF IN
|
EXISTS EXPLAIN FIELD FROM GRANT GROUP
|
||||||
INNER INSERT INTO KEY KEYS LIMIT
|
IF IN INNER INSERT INTO KEY
|
||||||
SHOW MEASUREMENT MEASUREMENTS NOT OFFSET ON
|
KEYS LIMIT SHOW MEASUREMENT MEASUREMENTS NOT
|
||||||
ORDER PASSWORD POLICY POLICIES PRIVILEGES QUERIES
|
OFFSET ON ORDER PASSWORD POLICY POLICIES
|
||||||
QUERY READ REPLICATION RETENTION REVOKE SELECT
|
PRIVILEGES QUERIES QUERY READ REPLICATION RETENTION
|
||||||
SERIES SLIMIT SOFFSET TAG TO USER
|
REVOKE SELECT SERIES SLIMIT SOFFSET SUBSCRIPTION
|
||||||
USERS VALUES WHERE WITH WRITE
|
SUBSCRIPTIONS TAG TO USER USERS VALUES
|
||||||
|
WHERE WITH WRITE
|
||||||
```
|
```
|
||||||
|
|
||||||
## Literals
|
## Literals
|
||||||
|
@ -174,12 +175,14 @@ statement = alter_retention_policy_stmt |
|
||||||
create_database_stmt |
|
create_database_stmt |
|
||||||
create_retention_policy_stmt |
|
create_retention_policy_stmt |
|
||||||
create_user_stmt |
|
create_user_stmt |
|
||||||
|
create_subscription_stmt |
|
||||||
delete_stmt |
|
delete_stmt |
|
||||||
drop_continuous_query_stmt |
|
drop_continuous_query_stmt |
|
||||||
drop_database_stmt |
|
drop_database_stmt |
|
||||||
drop_measurement_stmt |
|
drop_measurement_stmt |
|
||||||
drop_retention_policy_stmt |
|
drop_retention_policy_stmt |
|
||||||
drop_series_stmt |
|
drop_series_stmt |
|
||||||
|
drop_subscription_stmt |
|
||||||
drop_user_stmt |
|
drop_user_stmt |
|
||||||
grant_stmt |
|
grant_stmt |
|
||||||
show_continuous_queries_stmt |
|
show_continuous_queries_stmt |
|
||||||
|
@ -189,6 +192,7 @@ statement = alter_retention_policy_stmt |
|
||||||
show_retention_policies |
|
show_retention_policies |
|
||||||
show_series_stmt |
|
show_series_stmt |
|
||||||
show_shards_stmt |
|
show_shards_stmt |
|
||||||
|
show_subscriptions_stmt|
|
||||||
show_tag_keys_stmt |
|
show_tag_keys_stmt |
|
||||||
show_tag_values_stmt |
|
show_tag_values_stmt |
|
||||||
show_users_stmt |
|
show_users_stmt |
|
||||||
|
@ -292,6 +296,22 @@ CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2;
|
||||||
CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2 DEFAULT;
|
CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2 DEFAULT;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### CREATE SUBSCRIPTION
|
||||||
|
|
||||||
|
```
|
||||||
|
create_subscription_stmt = "CREATE SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy "DESTINATIONS" ("ANY"|"ALL") host { "," host} .
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Examples:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Create a SUBSCRIPTION on database 'mydb' and retention policy 'default' that send data to 'example.com:9090' via UDP.
|
||||||
|
CREATE SUBSCRIPTION sub0 ON "mydb"."default" DESTINATIONS ALL 'udp://example.com:9090' ;
|
||||||
|
|
||||||
|
-- Create a SUBSCRIPTION on database 'mydb' and retention policy 'default' that round robins the data to 'h1.example.com:9090' and 'h2.example.com:9090'.
|
||||||
|
CREATE SUBSCRIPTION sub0 ON "mydb"."default" DESTINATIONS ANY 'udp://h1.example.com:9090', 'udp://h2.example.com:9090';
|
||||||
|
```
|
||||||
|
|
||||||
### CREATE USER
|
### CREATE USER
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -382,6 +402,19 @@ drop_series_stmt = "DROP SERIES" [ from_clause ] [ where_clause ]
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### DROP SUBSCRIPTION
|
||||||
|
|
||||||
|
```
|
||||||
|
drop_subscription_stmt = "DROP SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy .
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP SUBSCRIPTION sub0 ON "mydb"."default";
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
### DROP USER
|
### DROP USER
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -502,6 +535,18 @@ show_shards_stmt = "SHOW SHARDS" .
|
||||||
SHOW SHARDS;
|
SHOW SHARDS;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### SHOW SUBSCRIPTIONS
|
||||||
|
|
||||||
|
```
|
||||||
|
show_subscriptions_stmt = "SHOW SUBSCRIPTIONS" .
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW SUBSCRIPTIONS;
|
||||||
|
```
|
||||||
|
|
||||||
### SHOW TAG KEYS
|
### SHOW TAG KEYS
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -652,7 +697,7 @@ privilege = "ALL" [ "PRIVILEGES" ] | "READ" | "WRITE" .
|
||||||
|
|
||||||
series_id = int_lit .
|
series_id = int_lit .
|
||||||
|
|
||||||
sort_field = field_name [ ASC | DESC ] .
|
sort_field = field_key [ ASC | DESC ] .
|
||||||
|
|
||||||
sort_fields = sort_field { "," sort_field } .
|
sort_fields = sort_field { "," sort_field } .
|
||||||
|
|
||||||
|
|
|
@ -80,10 +80,12 @@ type Node interface {
|
||||||
func (*Query) node() {}
|
func (*Query) node() {}
|
||||||
func (Statements) node() {}
|
func (Statements) node() {}
|
||||||
|
|
||||||
|
func (*AlterDatabaseRenameStatement) node() {}
|
||||||
func (*AlterRetentionPolicyStatement) node() {}
|
func (*AlterRetentionPolicyStatement) node() {}
|
||||||
func (*CreateContinuousQueryStatement) node() {}
|
func (*CreateContinuousQueryStatement) node() {}
|
||||||
func (*CreateDatabaseStatement) node() {}
|
func (*CreateDatabaseStatement) node() {}
|
||||||
func (*CreateRetentionPolicyStatement) node() {}
|
func (*CreateRetentionPolicyStatement) node() {}
|
||||||
|
func (*CreateSubscriptionStatement) node() {}
|
||||||
func (*CreateUserStatement) node() {}
|
func (*CreateUserStatement) node() {}
|
||||||
func (*Distinct) node() {}
|
func (*Distinct) node() {}
|
||||||
func (*DeleteStatement) node() {}
|
func (*DeleteStatement) node() {}
|
||||||
|
@ -93,6 +95,7 @@ func (*DropMeasurementStatement) node() {}
|
||||||
func (*DropRetentionPolicyStatement) node() {}
|
func (*DropRetentionPolicyStatement) node() {}
|
||||||
func (*DropSeriesStatement) node() {}
|
func (*DropSeriesStatement) node() {}
|
||||||
func (*DropServerStatement) node() {}
|
func (*DropServerStatement) node() {}
|
||||||
|
func (*DropSubscriptionStatement) node() {}
|
||||||
func (*DropUserStatement) node() {}
|
func (*DropUserStatement) node() {}
|
||||||
func (*GrantStatement) node() {}
|
func (*GrantStatement) node() {}
|
||||||
func (*GrantAdminStatement) node() {}
|
func (*GrantAdminStatement) node() {}
|
||||||
|
@ -110,6 +113,7 @@ func (*ShowMeasurementsStatement) node() {}
|
||||||
func (*ShowSeriesStatement) node() {}
|
func (*ShowSeriesStatement) node() {}
|
||||||
func (*ShowShardsStatement) node() {}
|
func (*ShowShardsStatement) node() {}
|
||||||
func (*ShowStatsStatement) node() {}
|
func (*ShowStatsStatement) node() {}
|
||||||
|
func (*ShowSubscriptionsStatement) node() {}
|
||||||
func (*ShowDiagnosticsStatement) node() {}
|
func (*ShowDiagnosticsStatement) node() {}
|
||||||
func (*ShowTagKeysStatement) node() {}
|
func (*ShowTagKeysStatement) node() {}
|
||||||
func (*ShowTagValuesStatement) node() {}
|
func (*ShowTagValuesStatement) node() {}
|
||||||
|
@ -188,10 +192,12 @@ type ExecutionPrivilege struct {
|
||||||
// ExecutionPrivileges is a list of privileges required to execute a statement.
|
// ExecutionPrivileges is a list of privileges required to execute a statement.
|
||||||
type ExecutionPrivileges []ExecutionPrivilege
|
type ExecutionPrivileges []ExecutionPrivilege
|
||||||
|
|
||||||
|
func (*AlterDatabaseRenameStatement) stmt() {}
|
||||||
func (*AlterRetentionPolicyStatement) stmt() {}
|
func (*AlterRetentionPolicyStatement) stmt() {}
|
||||||
func (*CreateContinuousQueryStatement) stmt() {}
|
func (*CreateContinuousQueryStatement) stmt() {}
|
||||||
func (*CreateDatabaseStatement) stmt() {}
|
func (*CreateDatabaseStatement) stmt() {}
|
||||||
func (*CreateRetentionPolicyStatement) stmt() {}
|
func (*CreateRetentionPolicyStatement) stmt() {}
|
||||||
|
func (*CreateSubscriptionStatement) stmt() {}
|
||||||
func (*CreateUserStatement) stmt() {}
|
func (*CreateUserStatement) stmt() {}
|
||||||
func (*DeleteStatement) stmt() {}
|
func (*DeleteStatement) stmt() {}
|
||||||
func (*DropContinuousQueryStatement) stmt() {}
|
func (*DropContinuousQueryStatement) stmt() {}
|
||||||
|
@ -200,6 +206,7 @@ func (*DropMeasurementStatement) stmt() {}
|
||||||
func (*DropRetentionPolicyStatement) stmt() {}
|
func (*DropRetentionPolicyStatement) stmt() {}
|
||||||
func (*DropSeriesStatement) stmt() {}
|
func (*DropSeriesStatement) stmt() {}
|
||||||
func (*DropServerStatement) stmt() {}
|
func (*DropServerStatement) stmt() {}
|
||||||
|
func (*DropSubscriptionStatement) stmt() {}
|
||||||
func (*DropUserStatement) stmt() {}
|
func (*DropUserStatement) stmt() {}
|
||||||
func (*GrantStatement) stmt() {}
|
func (*GrantStatement) stmt() {}
|
||||||
func (*GrantAdminStatement) stmt() {}
|
func (*GrantAdminStatement) stmt() {}
|
||||||
|
@ -213,6 +220,7 @@ func (*ShowRetentionPoliciesStatement) stmt() {}
|
||||||
func (*ShowSeriesStatement) stmt() {}
|
func (*ShowSeriesStatement) stmt() {}
|
||||||
func (*ShowShardsStatement) stmt() {}
|
func (*ShowShardsStatement) stmt() {}
|
||||||
func (*ShowStatsStatement) stmt() {}
|
func (*ShowStatsStatement) stmt() {}
|
||||||
|
func (*ShowSubscriptionsStatement) stmt() {}
|
||||||
func (*ShowDiagnosticsStatement) stmt() {}
|
func (*ShowDiagnosticsStatement) stmt() {}
|
||||||
func (*ShowTagKeysStatement) stmt() {}
|
func (*ShowTagKeysStatement) stmt() {}
|
||||||
func (*ShowTagValuesStatement) stmt() {}
|
func (*ShowTagValuesStatement) stmt() {}
|
||||||
|
@ -502,6 +510,30 @@ func (s *GrantAdminStatement) RequiredPrivileges() ExecutionPrivileges {
|
||||||
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
|
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AlterDatabaseRenameStatement represents a command for renaming a database.
|
||||||
|
type AlterDatabaseRenameStatement struct {
|
||||||
|
// Current name of the database
|
||||||
|
OldName string
|
||||||
|
// New name of the database
|
||||||
|
NewName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the rename database statement.
|
||||||
|
func (s *AlterDatabaseRenameStatement) String() string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
_, _ = buf.WriteString("ALTER DATABASE ")
|
||||||
|
_, _ = buf.WriteString(s.OldName)
|
||||||
|
_, _ = buf.WriteString(" RENAME ")
|
||||||
|
_, _ = buf.WriteString(" TO ")
|
||||||
|
_, _ = buf.WriteString(s.NewName)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequiredPrivileges returns the privilege required to execute an AlterDatabaseRenameStatement.
|
||||||
|
func (s *AlterDatabaseRenameStatement) RequiredPrivileges() ExecutionPrivileges {
|
||||||
|
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
|
||||||
|
}
|
||||||
|
|
||||||
// SetPasswordUserStatement represents a command for changing user password.
|
// SetPasswordUserStatement represents a command for changing user password.
|
||||||
type SetPasswordUserStatement struct {
|
type SetPasswordUserStatement struct {
|
||||||
// Plain Password
|
// Plain Password
|
||||||
|
@ -981,31 +1013,6 @@ func (s *SelectStatement) RequiredPrivileges() ExecutionPrivileges {
|
||||||
return ep
|
return ep
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnlyTimeDimensions returns true if the statement has a where clause with only time constraints
|
|
||||||
func (s *SelectStatement) OnlyTimeDimensions() bool {
|
|
||||||
return s.walkForTime(s.Condition)
|
|
||||||
}
|
|
||||||
|
|
||||||
// walkForTime is called by the OnlyTimeDimensions method to walk the where clause to determine if
|
|
||||||
// the only things specified are based on time
|
|
||||||
func (s *SelectStatement) walkForTime(node Node) bool {
|
|
||||||
switch n := node.(type) {
|
|
||||||
case *BinaryExpr:
|
|
||||||
if n.Op == AND || n.Op == OR {
|
|
||||||
return s.walkForTime(n.LHS) && s.walkForTime(n.RHS)
|
|
||||||
}
|
|
||||||
if ref, ok := n.LHS.(*VarRef); ok && strings.ToLower(ref.Val) == "time" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
case *ParenExpr:
|
|
||||||
// walk down the tree
|
|
||||||
return s.walkForTime(n.Expr)
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasWildcard returns whether or not the select statement has at least 1 wildcard
|
// HasWildcard returns whether or not the select statement has at least 1 wildcard
|
||||||
func (s *SelectStatement) HasWildcard() bool {
|
func (s *SelectStatement) HasWildcard() bool {
|
||||||
return s.HasFieldWildcard() || s.HasDimensionWildcard()
|
return s.HasFieldWildcard() || s.HasDimensionWildcard()
|
||||||
|
@ -1036,26 +1043,6 @@ func (s *SelectStatement) HasDimensionWildcard() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// hasTimeDimensions returns whether or not the select statement has at least 1
|
|
||||||
// where condition with time as the condition
|
|
||||||
func (s *SelectStatement) hasTimeDimensions(node Node) bool {
|
|
||||||
switch n := node.(type) {
|
|
||||||
case *BinaryExpr:
|
|
||||||
if n.Op == AND || n.Op == OR {
|
|
||||||
return s.hasTimeDimensions(n.LHS) || s.hasTimeDimensions(n.RHS)
|
|
||||||
}
|
|
||||||
if ref, ok := n.LHS.(*VarRef); ok && strings.ToLower(ref.Val) == "time" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
case *ParenExpr:
|
|
||||||
// walk down the tree
|
|
||||||
return s.hasTimeDimensions(n.Expr)
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SelectStatement) validate(tr targetRequirement) error {
|
func (s *SelectStatement) validate(tr targetRequirement) error {
|
||||||
if err := s.validateFields(); err != nil {
|
if err := s.validateFields(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1254,7 +1241,7 @@ func (s *SelectStatement) validateAggregates(tr targetRequirement) error {
|
||||||
|
|
||||||
// If we have an aggregate function with a group by time without a where clause, it's an invalid statement
|
// If we have an aggregate function with a group by time without a where clause, it's an invalid statement
|
||||||
if tr == targetNotRequired { // ignore create continuous query statements
|
if tr == targetNotRequired { // ignore create continuous query statements
|
||||||
if !s.IsRawQuery && groupByDuration > 0 && !s.hasTimeDimensions(s.Condition) {
|
if !s.IsRawQuery && groupByDuration > 0 && !HasTimeExpr(s.Condition) {
|
||||||
return fmt.Errorf("aggregate functions with GROUP BY time require a WHERE time clause")
|
return fmt.Errorf("aggregate functions with GROUP BY time require a WHERE time clause")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1736,7 +1723,7 @@ func (s *DeleteStatement) String() string {
|
||||||
_, _ = buf.WriteString(" WHERE ")
|
_, _ = buf.WriteString(" WHERE ")
|
||||||
_, _ = buf.WriteString(s.Condition.String())
|
_, _ = buf.WriteString(s.Condition.String())
|
||||||
}
|
}
|
||||||
return s.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequiredPrivileges returns the privilege required to execute a DeleteStatement.
|
// RequiredPrivileges returns the privilege required to execute a DeleteStatement.
|
||||||
|
@ -2102,6 +2089,65 @@ func (s *ShowDiagnosticsStatement) RequiredPrivileges() ExecutionPrivileges {
|
||||||
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
|
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateSubscriptionStatement represents a command to add a subscription to the incoming data stream
|
||||||
|
type CreateSubscriptionStatement struct {
|
||||||
|
Name string
|
||||||
|
Database string
|
||||||
|
RetentionPolicy string
|
||||||
|
Destinations []string
|
||||||
|
Mode string
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the CreateSubscriptionStatement.
|
||||||
|
func (s *CreateSubscriptionStatement) String() string {
|
||||||
|
var destinations bytes.Buffer
|
||||||
|
for i, dest := range s.Destinations {
|
||||||
|
if i != 0 {
|
||||||
|
destinations.Write([]byte(`, `))
|
||||||
|
}
|
||||||
|
destinations.Write([]byte(`'`))
|
||||||
|
destinations.Write([]byte(dest))
|
||||||
|
destinations.Write([]byte(`'`))
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(`CREATE SUBSCRIPTION "%s" ON "%s"."%s" DESTINATIONS %s %s `, s.Name, s.Database, s.RetentionPolicy, s.Mode, string(destinations.Bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequiredPrivileges returns the privilege required to execute a CreateSubscriptionStatement
|
||||||
|
func (s *CreateSubscriptionStatement) RequiredPrivileges() ExecutionPrivileges {
|
||||||
|
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DropSubscriptionStatement represents a command to drop a subscription to the incoming data stream.
|
||||||
|
type DropSubscriptionStatement struct {
|
||||||
|
Name string
|
||||||
|
Database string
|
||||||
|
RetentionPolicy string
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the DropSubscriptionStatement.
|
||||||
|
func (s *DropSubscriptionStatement) String() string {
|
||||||
|
return fmt.Sprintf(`DROP SUBSCRIPTION "%s" ON "%s"."%s"`, s.Name, s.Database, s.RetentionPolicy)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequiredPrivileges returns the privilege required to execute a DropSubscriptionStatement
|
||||||
|
func (s *DropSubscriptionStatement) RequiredPrivileges() ExecutionPrivileges {
|
||||||
|
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShowSubscriptionsStatement represents a command to show a list of subscriptions.
|
||||||
|
type ShowSubscriptionsStatement struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the ShowSubscriptionStatement.
|
||||||
|
func (s *ShowSubscriptionsStatement) String() string {
|
||||||
|
return "SHOW SUBSCRIPTIONS"
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequiredPrivileges returns the privilege required to execute a ShowSubscriptionStatement
|
||||||
|
func (s *ShowSubscriptionsStatement) RequiredPrivileges() ExecutionPrivileges {
|
||||||
|
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
|
||||||
|
}
|
||||||
|
|
||||||
// ShowTagKeysStatement represents a command for listing tag keys.
|
// ShowTagKeysStatement represents a command for listing tag keys.
|
||||||
type ShowTagKeysStatement struct {
|
type ShowTagKeysStatement struct {
|
||||||
// Data sources that fields are extracted from.
|
// Data sources that fields are extracted from.
|
||||||
|
@ -2635,7 +2681,7 @@ type RegexLiteral struct {
|
||||||
// String returns a string representation of the literal.
|
// String returns a string representation of the literal.
|
||||||
func (r *RegexLiteral) String() string {
|
func (r *RegexLiteral) String() string {
|
||||||
if r.Val != nil {
|
if r.Val != nil {
|
||||||
return fmt.Sprintf("/%s/", r.Val.String())
|
return fmt.Sprintf("/%s/", strings.Replace(r.Val.String(), `/`, `\/`, -1))
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
@ -2698,6 +2744,47 @@ func CloneExpr(expr Expr) Expr {
|
||||||
panic("unreachable")
|
panic("unreachable")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasTimeExpr returns true if the expression has a time term.
|
||||||
|
func HasTimeExpr(expr Expr) bool {
|
||||||
|
switch n := expr.(type) {
|
||||||
|
case *BinaryExpr:
|
||||||
|
if n.Op == AND || n.Op == OR {
|
||||||
|
return HasTimeExpr(n.LHS) || HasTimeExpr(n.RHS)
|
||||||
|
}
|
||||||
|
if ref, ok := n.LHS.(*VarRef); ok && strings.ToLower(ref.Val) == "time" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
case *ParenExpr:
|
||||||
|
// walk down the tree
|
||||||
|
return HasTimeExpr(n.Expr)
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyTimeExpr returns true if the expression only has time constraints.
|
||||||
|
func OnlyTimeExpr(expr Expr) bool {
|
||||||
|
if expr == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
switch n := expr.(type) {
|
||||||
|
case *BinaryExpr:
|
||||||
|
if n.Op == AND || n.Op == OR {
|
||||||
|
return OnlyTimeExpr(n.LHS) && OnlyTimeExpr(n.RHS)
|
||||||
|
}
|
||||||
|
if ref, ok := n.LHS.(*VarRef); ok && strings.ToLower(ref.Val) == "time" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
case *ParenExpr:
|
||||||
|
// walk down the tree
|
||||||
|
return OnlyTimeExpr(n.Expr)
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TimeRange returns the minimum and maximum times specified by an expression.
|
// TimeRange returns the minimum and maximum times specified by an expression.
|
||||||
// Returns zero times if there is no bound.
|
// Returns zero times if there is no bound.
|
||||||
func TimeRange(expr Expr) (min, max time.Time) {
|
func TimeRange(expr Expr) (min, max time.Time) {
|
||||||
|
|
|
@ -521,7 +521,7 @@ func TestTimeRange(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that we see if a where clause has only time limitations
|
// Ensure that we see if a where clause has only time limitations
|
||||||
func TestSelectStatement_OnlyTimeDimensions(t *testing.T) {
|
func TestOnlyTimeExpr(t *testing.T) {
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
stmt string
|
stmt string
|
||||||
exp bool
|
exp bool
|
||||||
|
@ -554,7 +554,7 @@ func TestSelectStatement_OnlyTimeDimensions(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("invalid statement: %q: %s", tt.stmt, err)
|
t.Fatalf("invalid statement: %q: %s", tt.stmt, err)
|
||||||
}
|
}
|
||||||
if stmt.(*influxql.SelectStatement).OnlyTimeDimensions() != tt.exp {
|
if influxql.OnlyTimeExpr(stmt.(*influxql.SelectStatement).Condition) != tt.exp {
|
||||||
t.Fatalf("%d. expected statement to return only time dimension to be %t: %s", i, tt.exp, tt.stmt)
|
t.Fatalf("%d. expected statement to return only time dimension to be %t: %s", i, tt.exp, tt.stmt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -146,6 +146,8 @@ func (p *Parser) parseShowStatement() (Statement, error) {
|
||||||
return nil, newParseError(tokstr(tok, lit), []string{"KEYS", "VALUES"}, pos)
|
return nil, newParseError(tokstr(tok, lit), []string{"KEYS", "VALUES"}, pos)
|
||||||
case USERS:
|
case USERS:
|
||||||
return p.parseShowUsersStatement()
|
return p.parseShowUsersStatement()
|
||||||
|
case SUBSCRIPTIONS:
|
||||||
|
return p.parseShowSubscriptionsStatement()
|
||||||
}
|
}
|
||||||
|
|
||||||
showQueryKeywords := []string{
|
showQueryKeywords := []string{
|
||||||
|
@ -162,6 +164,7 @@ func (p *Parser) parseShowStatement() (Statement, error) {
|
||||||
"STATS",
|
"STATS",
|
||||||
"DIAGNOSTICS",
|
"DIAGNOSTICS",
|
||||||
"SHARDS",
|
"SHARDS",
|
||||||
|
"SUBSCRIPTIONS",
|
||||||
}
|
}
|
||||||
sort.Strings(showQueryKeywords)
|
sort.Strings(showQueryKeywords)
|
||||||
|
|
||||||
|
@ -184,9 +187,11 @@ func (p *Parser) parseCreateStatement() (Statement, error) {
|
||||||
return nil, newParseError(tokstr(tok, lit), []string{"POLICY"}, pos)
|
return nil, newParseError(tokstr(tok, lit), []string{"POLICY"}, pos)
|
||||||
}
|
}
|
||||||
return p.parseCreateRetentionPolicyStatement()
|
return p.parseCreateRetentionPolicyStatement()
|
||||||
|
} else if tok == SUBSCRIPTION {
|
||||||
|
return p.parseCreateSubscriptionStatement()
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, newParseError(tokstr(tok, lit), []string{"CONTINUOUS", "DATABASE", "USER", "RETENTION"}, pos)
|
return nil, newParseError(tokstr(tok, lit), []string{"CONTINUOUS", "DATABASE", "USER", "RETENTION", "SUBSCRIPTION"}, pos)
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseDropStatement parses a string and returns a drop statement.
|
// parseDropStatement parses a string and returns a drop statement.
|
||||||
|
@ -210,23 +215,29 @@ func (p *Parser) parseDropStatement() (Statement, error) {
|
||||||
return p.parseDropUserStatement()
|
return p.parseDropUserStatement()
|
||||||
} else if tok == SERVER {
|
} else if tok == SERVER {
|
||||||
return p.parseDropServerStatement()
|
return p.parseDropServerStatement()
|
||||||
|
} else if tok == SUBSCRIPTION {
|
||||||
|
return p.parseDropSubscriptionStatement()
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, newParseError(tokstr(tok, lit), []string{"SERIES", "CONTINUOUS", "MEASUREMENT"}, pos)
|
return nil, newParseError(tokstr(tok, lit), []string{"SERIES", "CONTINUOUS", "MEASUREMENT", "SERVER", "SUBSCRIPTION"}, pos)
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseAlterStatement parses a string and returns an alter statement.
|
// parseAlterStatement parses a string and returns an alter statement.
|
||||||
// This function assumes the ALTER token has already been consumed.
|
// This function assumes the ALTER token has already been consumed.
|
||||||
func (p *Parser) parseAlterStatement() (Statement, error) {
|
func (p *Parser) parseAlterStatement() (Statement, error) {
|
||||||
tok, pos, lit := p.scanIgnoreWhitespace()
|
tok, pos, lit := p.scanIgnoreWhitespace()
|
||||||
if tok == RETENTION {
|
|
||||||
|
switch tok {
|
||||||
|
case RETENTION:
|
||||||
if tok, pos, lit = p.scanIgnoreWhitespace(); tok != POLICY {
|
if tok, pos, lit = p.scanIgnoreWhitespace(); tok != POLICY {
|
||||||
return nil, newParseError(tokstr(tok, lit), []string{"POLICY"}, pos)
|
return nil, newParseError(tokstr(tok, lit), []string{"POLICY"}, pos)
|
||||||
}
|
}
|
||||||
return p.parseAlterRetentionPolicyStatement()
|
return p.parseAlterRetentionPolicyStatement()
|
||||||
|
case DATABASE:
|
||||||
|
return p.parseAlterDatabaseRenameStatement()
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, newParseError(tokstr(tok, lit), []string{"RETENTION"}, pos)
|
return nil, newParseError(tokstr(tok, lit), []string{"RETENTION", "DATABASE"}, pos)
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseSetPasswordUserStatement parses a string and returns a set statement.
|
// parseSetPasswordUserStatement parses a string and returns a set statement.
|
||||||
|
@ -261,6 +272,61 @@ func (p *Parser) parseSetPasswordUserStatement() (*SetPasswordUserStatement, err
|
||||||
return stmt, nil
|
return stmt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseCreateSubscriptionStatement parses a string and returns a CreatesubScriptionStatement.
|
||||||
|
// This function assumes the "CREATE SUBSCRIPTION" tokens have already been consumed.
|
||||||
|
func (p *Parser) parseCreateSubscriptionStatement() (*CreateSubscriptionStatement, error) {
|
||||||
|
stmt := &CreateSubscriptionStatement{}
|
||||||
|
|
||||||
|
// Read the id of the subscription to create.
|
||||||
|
ident, err := p.parseIdent()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stmt.Name = ident
|
||||||
|
|
||||||
|
// Expect an "ON" keyword.
|
||||||
|
if tok, pos, lit := p.scanIgnoreWhitespace(); tok != ON {
|
||||||
|
return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the name of the database.
|
||||||
|
if ident, err = p.parseIdent(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stmt.Database = ident
|
||||||
|
|
||||||
|
if tok, pos, lit := p.scan(); tok != DOT {
|
||||||
|
return nil, newParseError(tokstr(tok, lit), []string{"."}, pos)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the name of the retention policy.
|
||||||
|
if ident, err = p.parseIdent(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stmt.RetentionPolicy = ident
|
||||||
|
|
||||||
|
// Expect a "DESTINATIONS" keyword.
|
||||||
|
if tok, pos, lit := p.scanIgnoreWhitespace(); tok != DESTINATIONS {
|
||||||
|
return nil, newParseError(tokstr(tok, lit), []string{"DESTINATIONS"}, pos)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expect one of "ANY ALL" keywords.
|
||||||
|
if tok, pos, lit := p.scanIgnoreWhitespace(); tok == ALL || tok == ANY {
|
||||||
|
stmt.Mode = tokens[tok]
|
||||||
|
} else {
|
||||||
|
return nil, newParseError(tokstr(tok, lit), []string{"ALL", "ANY"}, pos)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read list of destinations.
|
||||||
|
var destinations []string
|
||||||
|
if destinations, err = p.parseStringList(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stmt.Destinations = destinations
|
||||||
|
|
||||||
|
return stmt, nil
|
||||||
|
}
|
||||||
|
|
||||||
// parseCreateRetentionPolicyStatement parses a string and returns a create retention policy statement.
|
// parseCreateRetentionPolicyStatement parses a string and returns a create retention policy statement.
|
||||||
// This function assumes the CREATE RETENTION POLICY tokens have already been consumed.
|
// This function assumes the CREATE RETENTION POLICY tokens have already been consumed.
|
||||||
func (p *Parser) parseCreateRetentionPolicyStatement() (*CreateRetentionPolicyStatement, error) {
|
func (p *Parser) parseCreateRetentionPolicyStatement() (*CreateRetentionPolicyStatement, error) {
|
||||||
|
@ -544,6 +610,30 @@ func (p *Parser) parseString() (string, error) {
|
||||||
return lit, nil
|
return lit, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parserString parses a string.
|
||||||
|
func (p *Parser) parseStringList() ([]string, error) {
|
||||||
|
// Parse first (required) string.
|
||||||
|
str, err := p.parseString()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
strs := []string{str}
|
||||||
|
|
||||||
|
// Parse remaining (optional) strings.
|
||||||
|
for {
|
||||||
|
if tok, _, _ := p.scanIgnoreWhitespace(); tok != COMMA {
|
||||||
|
p.unscan()
|
||||||
|
return strs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if str, err = p.parseString(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
strs = append(strs, str)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// parseRevokeStatement parses a string and returns a revoke statement.
|
// parseRevokeStatement parses a string and returns a revoke statement.
|
||||||
// This function assumes the REVOKE token has already been consumed.
|
// This function assumes the REVOKE token has already been consumed.
|
||||||
func (p *Parser) parseRevokeStatement() (Statement, error) {
|
func (p *Parser) parseRevokeStatement() (Statement, error) {
|
||||||
|
@ -1102,6 +1192,13 @@ func (p *Parser) parseShowUsersStatement() (*ShowUsersStatement, error) {
|
||||||
return &ShowUsersStatement{}, nil
|
return &ShowUsersStatement{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseShowSubscriptionsStatement parses a string and returns a ShowSubscriptionsStatement
|
||||||
|
// This function assumes the "SHOW SUBSCRIPTIONS" tokens have been consumed.
|
||||||
|
func (p *Parser) parseShowSubscriptionsStatement() (*ShowSubscriptionsStatement, error) {
|
||||||
|
stmt := &ShowSubscriptionsStatement{}
|
||||||
|
return stmt, nil
|
||||||
|
}
|
||||||
|
|
||||||
// parseShowFieldKeysStatement parses a string and returns a ShowSeriesStatement.
|
// parseShowFieldKeysStatement parses a string and returns a ShowSeriesStatement.
|
||||||
// This function assumes the "SHOW FIELD KEYS" tokens have already been consumed.
|
// This function assumes the "SHOW FIELD KEYS" tokens have already been consumed.
|
||||||
func (p *Parser) parseShowFieldKeysStatement() (*ShowFieldKeysStatement, error) {
|
func (p *Parser) parseShowFieldKeysStatement() (*ShowFieldKeysStatement, error) {
|
||||||
|
@ -1352,6 +1449,69 @@ func (p *Parser) parseDropDatabaseStatement() (*DropDatabaseStatement, error) {
|
||||||
return stmt, nil
|
return stmt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseAlterDatabaseRenameStatement parses a string and returns an AlterDatabaseRenameStatement.
|
||||||
|
// This function assumes the "ALTER DATABASE" tokens have already been consumed.
|
||||||
|
func (p *Parser) parseAlterDatabaseRenameStatement() (*AlterDatabaseRenameStatement, error) {
|
||||||
|
stmt := &AlterDatabaseRenameStatement{}
|
||||||
|
|
||||||
|
// Parse the name of the database to be renamed.
|
||||||
|
lit, err := p.parseIdent()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stmt.OldName = lit
|
||||||
|
|
||||||
|
// Parse required RENAME TO tokens.
|
||||||
|
if err := p.parseTokens([]Token{RENAME, TO}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the new name of the database.
|
||||||
|
lit, err = p.parseIdent()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stmt.NewName = lit
|
||||||
|
|
||||||
|
return stmt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDropSubscriptionStatement parses a string and returns a DropSubscriptionStatement.
|
||||||
|
// This function assumes the "DROP SUBSCRIPTION" tokens have already been consumed.
|
||||||
|
func (p *Parser) parseDropSubscriptionStatement() (*DropSubscriptionStatement, error) {
|
||||||
|
stmt := &DropSubscriptionStatement{}
|
||||||
|
|
||||||
|
// Read the id of the subscription to drop.
|
||||||
|
ident, err := p.parseIdent()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stmt.Name = ident
|
||||||
|
|
||||||
|
// Expect an "ON" keyword.
|
||||||
|
if tok, pos, lit := p.scanIgnoreWhitespace(); tok != ON {
|
||||||
|
return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the name of the database.
|
||||||
|
if ident, err = p.parseIdent(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stmt.Database = ident
|
||||||
|
|
||||||
|
if tok, pos, lit := p.scan(); tok != DOT {
|
||||||
|
return nil, newParseError(tokstr(tok, lit), []string{"."}, pos)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the name of the retention policy.
|
||||||
|
if ident, err = p.parseIdent(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stmt.RetentionPolicy = ident
|
||||||
|
|
||||||
|
return stmt, nil
|
||||||
|
}
|
||||||
|
|
||||||
// parseDropRetentionPolicyStatement parses a string and returns a DropRetentionPolicyStatement.
|
// parseDropRetentionPolicyStatement parses a string and returns a DropRetentionPolicyStatement.
|
||||||
// This function assumes the DROP RETENTION POLICY tokens have been consumed.
|
// This function assumes the DROP RETENTION POLICY tokens have been consumed.
|
||||||
func (p *Parser) parseDropRetentionPolicyStatement() (*DropRetentionPolicyStatement, error) {
|
func (p *Parser) parseDropRetentionPolicyStatement() (*DropRetentionPolicyStatement, error) {
|
||||||
|
|
|
@ -1418,6 +1418,12 @@ func TestParser_ParseStatement(t *testing.T) {
|
||||||
stmt: newAlterRetentionPolicyStatement("default", "testdb", -1, 4, false),
|
stmt: newAlterRetentionPolicyStatement("default", "testdb", -1, 4, false),
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// ALTER DATABASE RENAME
|
||||||
|
{
|
||||||
|
s: `ALTER DATABASE db0 RENAME TO db1`,
|
||||||
|
stmt: newAlterDatabaseRenameStatement("db0", "db1"),
|
||||||
|
},
|
||||||
|
|
||||||
// SHOW STATS
|
// SHOW STATS
|
||||||
{
|
{
|
||||||
s: `SHOW STATS`,
|
s: `SHOW STATS`,
|
||||||
|
@ -1450,6 +1456,34 @@ func TestParser_ParseStatement(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// CREATE SUBSCRIPTION
|
||||||
|
{
|
||||||
|
s: `CREATE SUBSCRIPTION "name" ON "db"."rp" DESTINATIONS ANY 'udp://host1:9093', 'udp://host2:9093'`,
|
||||||
|
stmt: &influxql.CreateSubscriptionStatement{
|
||||||
|
Name: "name",
|
||||||
|
Database: "db",
|
||||||
|
RetentionPolicy: "rp",
|
||||||
|
Destinations: []string{"udp://host1:9093", "udp://host2:9093"},
|
||||||
|
Mode: "ANY",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// DROP SUBSCRIPTION
|
||||||
|
{
|
||||||
|
s: `DROP SUBSCRIPTION "name" ON "db"."rp"`,
|
||||||
|
stmt: &influxql.DropSubscriptionStatement{
|
||||||
|
Name: "name",
|
||||||
|
Database: "db",
|
||||||
|
RetentionPolicy: "rp",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// SHOW SUBSCRIPTIONS
|
||||||
|
{
|
||||||
|
s: `SHOW SUBSCRIPTIONS`,
|
||||||
|
stmt: &influxql.ShowSubscriptionsStatement{},
|
||||||
|
},
|
||||||
|
|
||||||
// Errors
|
// Errors
|
||||||
{s: ``, err: `found EOF, expected SELECT, DELETE, SHOW, CREATE, DROP, GRANT, REVOKE, ALTER, SET at line 1, char 1`},
|
{s: ``, err: `found EOF, expected SELECT, DELETE, SHOW, CREATE, DROP, GRANT, REVOKE, ALTER, SET at line 1, char 1`},
|
||||||
{s: `SELECT`, err: `found EOF, expected identifier, string, number, bool at line 1, char 8`},
|
{s: `SELECT`, err: `found EOF, expected identifier, string, number, bool at line 1, char 8`},
|
||||||
|
@ -1535,7 +1569,7 @@ func TestParser_ParseStatement(t *testing.T) {
|
||||||
{s: `SHOW RETENTION POLICIES`, err: `found EOF, expected ON at line 1, char 25`},
|
{s: `SHOW RETENTION POLICIES`, err: `found EOF, expected ON at line 1, char 25`},
|
||||||
{s: `SHOW RETENTION POLICIES mydb`, err: `found mydb, expected ON at line 1, char 25`},
|
{s: `SHOW RETENTION POLICIES mydb`, err: `found mydb, expected ON at line 1, char 25`},
|
||||||
{s: `SHOW RETENTION POLICIES ON`, err: `found EOF, expected identifier at line 1, char 28`},
|
{s: `SHOW RETENTION POLICIES ON`, err: `found EOF, expected identifier at line 1, char 28`},
|
||||||
{s: `SHOW FOO`, err: `found FOO, expected CONTINUOUS, DATABASES, DIAGNOSTICS, FIELD, GRANTS, MEASUREMENTS, RETENTION, SERIES, SERVERS, SHARDS, STATS, TAG, USERS at line 1, char 6`},
|
{s: `SHOW FOO`, err: `found FOO, expected CONTINUOUS, DATABASES, DIAGNOSTICS, FIELD, GRANTS, MEASUREMENTS, RETENTION, SERIES, SERVERS, SHARDS, STATS, SUBSCRIPTIONS, TAG, USERS at line 1, char 6`},
|
||||||
{s: `SHOW STATS FOR`, err: `found EOF, expected string at line 1, char 16`},
|
{s: `SHOW STATS FOR`, err: `found EOF, expected string at line 1, char 16`},
|
||||||
{s: `SHOW DIAGNOSTICS FOR`, err: `found EOF, expected string at line 1, char 22`},
|
{s: `SHOW DIAGNOSTICS FOR`, err: `found EOF, expected string at line 1, char 22`},
|
||||||
{s: `SHOW GRANTS`, err: `found EOF, expected FOR at line 1, char 13`},
|
{s: `SHOW GRANTS`, err: `found EOF, expected FOR at line 1, char 13`},
|
||||||
|
@ -1546,7 +1580,8 @@ func TestParser_ParseStatement(t *testing.T) {
|
||||||
{s: `DROP CONTINUOUS QUERY myquery ON`, err: `found EOF, expected identifier at line 1, char 34`},
|
{s: `DROP CONTINUOUS QUERY myquery ON`, err: `found EOF, expected identifier at line 1, char 34`},
|
||||||
{s: `CREATE CONTINUOUS`, err: `found EOF, expected QUERY at line 1, char 19`},
|
{s: `CREATE CONTINUOUS`, err: `found EOF, expected QUERY at line 1, char 19`},
|
||||||
{s: `CREATE CONTINUOUS QUERY`, err: `found EOF, expected identifier at line 1, char 25`},
|
{s: `CREATE CONTINUOUS QUERY`, err: `found EOF, expected identifier at line 1, char 25`},
|
||||||
{s: `DROP FOO`, err: `found FOO, expected SERIES, CONTINUOUS, MEASUREMENT at line 1, char 6`},
|
{s: `DROP FOO`, err: `found FOO, expected SERIES, CONTINUOUS, MEASUREMENT, SERVER, SUBSCRIPTION at line 1, char 6`},
|
||||||
|
{s: `CREATE FOO`, err: `found FOO, expected CONTINUOUS, DATABASE, USER, RETENTION, SUBSCRIPTION at line 1, char 8`},
|
||||||
{s: `CREATE DATABASE`, err: `found EOF, expected identifier at line 1, char 17`},
|
{s: `CREATE DATABASE`, err: `found EOF, expected identifier at line 1, char 17`},
|
||||||
{s: `CREATE DATABASE IF`, err: `found EOF, expected NOT at line 1, char 20`},
|
{s: `CREATE DATABASE IF`, err: `found EOF, expected NOT at line 1, char 20`},
|
||||||
{s: `CREATE DATABASE IF NOT`, err: `found EOF, expected EXISTS at line 1, char 24`},
|
{s: `CREATE DATABASE IF NOT`, err: `found EOF, expected EXISTS at line 1, char 24`},
|
||||||
|
@ -1557,11 +1592,24 @@ func TestParser_ParseStatement(t *testing.T) {
|
||||||
{s: `DROP RETENTION POLICY "1h.cpu"`, err: `found EOF, expected ON at line 1, char 31`},
|
{s: `DROP RETENTION POLICY "1h.cpu"`, err: `found EOF, expected ON at line 1, char 31`},
|
||||||
{s: `DROP RETENTION POLICY "1h.cpu" ON`, err: `found EOF, expected identifier at line 1, char 35`},
|
{s: `DROP RETENTION POLICY "1h.cpu" ON`, err: `found EOF, expected identifier at line 1, char 35`},
|
||||||
{s: `DROP USER`, err: `found EOF, expected identifier at line 1, char 11`},
|
{s: `DROP USER`, err: `found EOF, expected identifier at line 1, char 11`},
|
||||||
|
{s: `DROP SUBSCRIPTION`, err: `found EOF, expected identifier at line 1, char 19`},
|
||||||
|
{s: `DROP SUBSCRIPTION "name"`, err: `found EOF, expected ON at line 1, char 25`},
|
||||||
|
{s: `DROP SUBSCRIPTION "name" ON `, err: `found EOF, expected identifier at line 1, char 30`},
|
||||||
|
{s: `DROP SUBSCRIPTION "name" ON "db"`, err: `found EOF, expected . at line 1, char 33`},
|
||||||
|
{s: `DROP SUBSCRIPTION "name" ON "db".`, err: `found EOF, expected identifier at line 1, char 34`},
|
||||||
{s: `CREATE USER testuser`, err: `found EOF, expected WITH at line 1, char 22`},
|
{s: `CREATE USER testuser`, err: `found EOF, expected WITH at line 1, char 22`},
|
||||||
{s: `CREATE USER testuser WITH`, err: `found EOF, expected PASSWORD at line 1, char 27`},
|
{s: `CREATE USER testuser WITH`, err: `found EOF, expected PASSWORD at line 1, char 27`},
|
||||||
{s: `CREATE USER testuser WITH PASSWORD`, err: `found EOF, expected string at line 1, char 36`},
|
{s: `CREATE USER testuser WITH PASSWORD`, err: `found EOF, expected string at line 1, char 36`},
|
||||||
{s: `CREATE USER testuser WITH PASSWORD 'pwd' WITH`, err: `found EOF, expected ALL at line 1, char 47`},
|
{s: `CREATE USER testuser WITH PASSWORD 'pwd' WITH`, err: `found EOF, expected ALL at line 1, char 47`},
|
||||||
{s: `CREATE USER testuser WITH PASSWORD 'pwd' WITH ALL`, err: `found EOF, expected PRIVILEGES at line 1, char 51`},
|
{s: `CREATE USER testuser WITH PASSWORD 'pwd' WITH ALL`, err: `found EOF, expected PRIVILEGES at line 1, char 51`},
|
||||||
|
{s: `CREATE SUBSCRIPTION`, err: `found EOF, expected identifier at line 1, char 21`},
|
||||||
|
{s: `CREATE SUBSCRIPTION "name"`, err: `found EOF, expected ON at line 1, char 27`},
|
||||||
|
{s: `CREATE SUBSCRIPTION "name" ON `, err: `found EOF, expected identifier at line 1, char 32`},
|
||||||
|
{s: `CREATE SUBSCRIPTION "name" ON "db"`, err: `found EOF, expected . at line 1, char 35`},
|
||||||
|
{s: `CREATE SUBSCRIPTION "name" ON "db".`, err: `found EOF, expected identifier at line 1, char 36`},
|
||||||
|
{s: `CREATE SUBSCRIPTION "name" ON "db"."rp"`, err: `found EOF, expected DESTINATIONS at line 1, char 40`},
|
||||||
|
{s: `CREATE SUBSCRIPTION "name" ON "db"."rp" DESTINATIONS`, err: `found EOF, expected ALL, ANY at line 1, char 54`},
|
||||||
|
{s: `CREATE SUBSCRIPTION "name" ON "db"."rp" DESTINATIONS ALL `, err: `found EOF, expected string at line 1, char 59`},
|
||||||
{s: `GRANT`, err: `found EOF, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 7`},
|
{s: `GRANT`, err: `found EOF, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 7`},
|
||||||
{s: `GRANT BOGUS`, err: `found BOGUS, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 7`},
|
{s: `GRANT BOGUS`, err: `found BOGUS, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 7`},
|
||||||
{s: `GRANT READ`, err: `found EOF, expected ON at line 1, char 12`},
|
{s: `GRANT READ`, err: `found EOF, expected ON at line 1, char 12`},
|
||||||
|
@ -1639,11 +1687,15 @@ func TestParser_ParseStatement(t *testing.T) {
|
||||||
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 0`, err: `invalid value 0: must be 1 <= n <= 2147483647 at line 1, char 67`},
|
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 0`, err: `invalid value 0: must be 1 <= n <= 2147483647 at line 1, char 67`},
|
||||||
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION bad`, err: `found bad, expected number at line 1, char 67`},
|
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION bad`, err: `found bad, expected number at line 1, char 67`},
|
||||||
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 1 foo`, err: `found foo, expected DEFAULT at line 1, char 69`},
|
{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 1 foo`, err: `found foo, expected DEFAULT at line 1, char 69`},
|
||||||
{s: `ALTER`, err: `found EOF, expected RETENTION at line 1, char 7`},
|
{s: `ALTER`, err: `found EOF, expected RETENTION, DATABASE at line 1, char 7`},
|
||||||
{s: `ALTER RETENTION`, err: `found EOF, expected POLICY at line 1, char 17`},
|
{s: `ALTER RETENTION`, err: `found EOF, expected POLICY at line 1, char 17`},
|
||||||
{s: `ALTER RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 24`},
|
{s: `ALTER RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 24`},
|
||||||
{s: `ALTER RETENTION POLICY policy1`, err: `found EOF, expected ON at line 1, char 32`}, {s: `ALTER RETENTION POLICY policy1 ON`, err: `found EOF, expected identifier at line 1, char 35`},
|
{s: `ALTER RETENTION POLICY policy1`, err: `found EOF, expected ON at line 1, char 32`}, {s: `ALTER RETENTION POLICY policy1 ON`, err: `found EOF, expected identifier at line 1, char 35`},
|
||||||
{s: `ALTER RETENTION POLICY policy1 ON testdb`, err: `found EOF, expected DURATION, RETENTION, DEFAULT at line 1, char 42`},
|
{s: `ALTER RETENTION POLICY policy1 ON testdb`, err: `found EOF, expected DURATION, RETENTION, DEFAULT at line 1, char 42`},
|
||||||
|
{s: `ALTER DATABASE`, err: `found EOF, expected identifier at line 1, char 16`},
|
||||||
|
{s: `ALTER DATABASE db0`, err: `found EOF, expected RENAME at line 1, char 20`},
|
||||||
|
{s: `ALTER DATABASE db0 RENAME`, err: `found EOF, expected TO at line 1, char 27`},
|
||||||
|
{s: `ALTER DATABASE db0 RENAME TO`, err: `found EOF, expected identifier at line 1, char 30`},
|
||||||
{s: `SET`, err: `found EOF, expected PASSWORD at line 1, char 5`},
|
{s: `SET`, err: `found EOF, expected PASSWORD at line 1, char 5`},
|
||||||
{s: `SET PASSWORD`, err: `found EOF, expected FOR at line 1, char 14`},
|
{s: `SET PASSWORD`, err: `found EOF, expected FOR at line 1, char 14`},
|
||||||
{s: `SET PASSWORD something`, err: `found something, expected FOR at line 1, char 14`},
|
{s: `SET PASSWORD something`, err: `found something, expected FOR at line 1, char 14`},
|
||||||
|
@ -1768,7 +1820,7 @@ func TestParser_ParseExpr(t *testing.T) {
|
||||||
|
|
||||||
// Binary expression with regex.
|
// Binary expression with regex.
|
||||||
{
|
{
|
||||||
s: "region =~ /us.*/",
|
s: `region =~ /us.*/`,
|
||||||
expr: &influxql.BinaryExpr{
|
expr: &influxql.BinaryExpr{
|
||||||
Op: influxql.EQREGEX,
|
Op: influxql.EQREGEX,
|
||||||
LHS: &influxql.VarRef{Val: "region"},
|
LHS: &influxql.VarRef{Val: "region"},
|
||||||
|
@ -1776,6 +1828,16 @@ func TestParser_ParseExpr(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// Binary expression with quoted '/' regex.
|
||||||
|
{
|
||||||
|
s: `url =~ /http\:\/\/www\.example\.com/`,
|
||||||
|
expr: &influxql.BinaryExpr{
|
||||||
|
Op: influxql.EQREGEX,
|
||||||
|
LHS: &influxql.VarRef{Val: "url"},
|
||||||
|
RHS: &influxql.RegexLiteral{Val: regexp.MustCompile(`http\://www\.example\.com`)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
// Complex binary expression.
|
// Complex binary expression.
|
||||||
{
|
{
|
||||||
s: `value + 3 < 30 AND 1 + 2 OR true`,
|
s: `value + 3 < 30 AND 1 + 2 OR true`,
|
||||||
|
@ -2067,6 +2129,14 @@ func newAlterRetentionPolicyStatement(name string, DB string, d time.Duration, r
|
||||||
return stmt
|
return stmt
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newAlterDatabaseRenameStatement creates an initialized AlterDatabaseRenameStatement.
|
||||||
|
func newAlterDatabaseRenameStatement(oldName, newName string) *influxql.AlterDatabaseRenameStatement {
|
||||||
|
return &influxql.AlterDatabaseRenameStatement{
|
||||||
|
OldName: oldName,
|
||||||
|
NewName: newName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// mustMarshalJSON encodes a value to JSON.
|
// mustMarshalJSON encodes a value to JSON.
|
||||||
func mustMarshalJSON(v interface{}) []byte {
|
func mustMarshalJSON(v interface{}) []byte {
|
||||||
b, err := json.Marshal(v)
|
b, err := json.Marshal(v)
|
||||||
|
|
|
@ -150,6 +150,7 @@ func TestScanner_Scan(t *testing.T) {
|
||||||
{s: `QUERIES`, tok: influxql.QUERIES},
|
{s: `QUERIES`, tok: influxql.QUERIES},
|
||||||
{s: `QUERY`, tok: influxql.QUERY},
|
{s: `QUERY`, tok: influxql.QUERY},
|
||||||
{s: `READ`, tok: influxql.READ},
|
{s: `READ`, tok: influxql.READ},
|
||||||
|
{s: `RENAME`, tok: influxql.RENAME},
|
||||||
{s: `RETENTION`, tok: influxql.RETENTION},
|
{s: `RETENTION`, tok: influxql.RETENTION},
|
||||||
{s: `REVOKE`, tok: influxql.REVOKE},
|
{s: `REVOKE`, tok: influxql.REVOKE},
|
||||||
{s: `SELECT`, tok: influxql.SELECT},
|
{s: `SELECT`, tok: influxql.SELECT},
|
||||||
|
@ -276,6 +277,7 @@ func TestScanRegex(t *testing.T) {
|
||||||
{in: `/foo\/bar/`, tok: influxql.REGEX, lit: `foo/bar`},
|
{in: `/foo\/bar/`, tok: influxql.REGEX, lit: `foo/bar`},
|
||||||
{in: `/foo\\/bar/`, tok: influxql.REGEX, lit: `foo\/bar`},
|
{in: `/foo\\/bar/`, tok: influxql.REGEX, lit: `foo\/bar`},
|
||||||
{in: `/foo\\bar/`, tok: influxql.REGEX, lit: `foo\\bar`},
|
{in: `/foo\\bar/`, tok: influxql.REGEX, lit: `foo\\bar`},
|
||||||
|
{in: `/http\:\/\/www\.example\.com/`, tok: influxql.REGEX, lit: `http\://www\.example\.com`},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
|
|
|
@ -58,6 +58,7 @@ const (
|
||||||
// Keywords
|
// Keywords
|
||||||
ALL
|
ALL
|
||||||
ALTER
|
ALTER
|
||||||
|
ANY
|
||||||
AS
|
AS
|
||||||
ASC
|
ASC
|
||||||
BEGIN
|
BEGIN
|
||||||
|
@ -69,6 +70,8 @@ const (
|
||||||
DEFAULT
|
DEFAULT
|
||||||
DELETE
|
DELETE
|
||||||
DESC
|
DESC
|
||||||
|
DESTINATIONS
|
||||||
|
DIAGNOSTICS
|
||||||
DISTINCT
|
DISTINCT
|
||||||
DROP
|
DROP
|
||||||
DURATION
|
DURATION
|
||||||
|
@ -104,6 +107,7 @@ const (
|
||||||
QUERIES
|
QUERIES
|
||||||
QUERY
|
QUERY
|
||||||
READ
|
READ
|
||||||
|
RENAME
|
||||||
REPLICATION
|
REPLICATION
|
||||||
RETENTION
|
RETENTION
|
||||||
REVOKE
|
REVOKE
|
||||||
|
@ -115,9 +119,10 @@ const (
|
||||||
SHOW
|
SHOW
|
||||||
SHARDS
|
SHARDS
|
||||||
SLIMIT
|
SLIMIT
|
||||||
STATS
|
|
||||||
DIAGNOSTICS
|
|
||||||
SOFFSET
|
SOFFSET
|
||||||
|
STATS
|
||||||
|
SUBSCRIPTION
|
||||||
|
SUBSCRIPTIONS
|
||||||
TAG
|
TAG
|
||||||
TO
|
TO
|
||||||
USER
|
USER
|
||||||
|
@ -170,6 +175,7 @@ var tokens = [...]string{
|
||||||
|
|
||||||
ALL: "ALL",
|
ALL: "ALL",
|
||||||
ALTER: "ALTER",
|
ALTER: "ALTER",
|
||||||
|
ANY: "ANY",
|
||||||
AS: "AS",
|
AS: "AS",
|
||||||
ASC: "ASC",
|
ASC: "ASC",
|
||||||
BEGIN: "BEGIN",
|
BEGIN: "BEGIN",
|
||||||
|
@ -181,8 +187,10 @@ var tokens = [...]string{
|
||||||
DEFAULT: "DEFAULT",
|
DEFAULT: "DEFAULT",
|
||||||
DELETE: "DELETE",
|
DELETE: "DELETE",
|
||||||
DESC: "DESC",
|
DESC: "DESC",
|
||||||
DROP: "DROP",
|
DESTINATIONS: "DESTINATIONS",
|
||||||
|
DIAGNOSTICS: "DIAGNOSTICS",
|
||||||
DISTINCT: "DISTINCT",
|
DISTINCT: "DISTINCT",
|
||||||
|
DROP: "DROP",
|
||||||
DURATION: "DURATION",
|
DURATION: "DURATION",
|
||||||
END: "END",
|
END: "END",
|
||||||
EXISTS: "EXISTS",
|
EXISTS: "EXISTS",
|
||||||
|
@ -216,6 +224,7 @@ var tokens = [...]string{
|
||||||
QUERIES: "QUERIES",
|
QUERIES: "QUERIES",
|
||||||
QUERY: "QUERY",
|
QUERY: "QUERY",
|
||||||
READ: "READ",
|
READ: "READ",
|
||||||
|
RENAME: "RENAME",
|
||||||
REPLICATION: "REPLICATION",
|
REPLICATION: "REPLICATION",
|
||||||
RETENTION: "RETENTION",
|
RETENTION: "RETENTION",
|
||||||
REVOKE: "REVOKE",
|
REVOKE: "REVOKE",
|
||||||
|
@ -229,7 +238,8 @@ var tokens = [...]string{
|
||||||
SLIMIT: "SLIMIT",
|
SLIMIT: "SLIMIT",
|
||||||
SOFFSET: "SOFFSET",
|
SOFFSET: "SOFFSET",
|
||||||
STATS: "STATS",
|
STATS: "STATS",
|
||||||
DIAGNOSTICS: "DIAGNOSTICS",
|
SUBSCRIPTION: "SUBSCRIPTION",
|
||||||
|
SUBSCRIPTIONS: "SUBSCRIPTIONS",
|
||||||
TAG: "TAG",
|
TAG: "TAG",
|
||||||
TO: "TO",
|
TO: "TO",
|
||||||
USER: "USER",
|
USER: "USER",
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
package meta
|
package meta
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gogo/protobuf/proto"
|
"github.com/gogo/protobuf/proto"
|
||||||
|
@ -177,6 +179,69 @@ func (data *Data) DropDatabase(name string) error {
|
||||||
return ErrDatabaseNotFound
|
return ErrDatabaseNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RenameDatabase renames a database.
|
||||||
|
// Returns an error if oldName or newName is blank
|
||||||
|
// or if a database with the newName already exists
|
||||||
|
// or if a database with oldName does not exist
|
||||||
|
func (data *Data) RenameDatabase(oldName, newName string) error {
|
||||||
|
if newName == "" || oldName == "" {
|
||||||
|
return ErrDatabaseNameRequired
|
||||||
|
}
|
||||||
|
if data.Database(newName) != nil {
|
||||||
|
return ErrDatabaseExists
|
||||||
|
}
|
||||||
|
if data.Database(oldName) == nil {
|
||||||
|
return ErrDatabaseNotFound
|
||||||
|
}
|
||||||
|
// TODO should rename database in continuous queries also
|
||||||
|
// for now, just return an error if there is a possible conflict
|
||||||
|
if data.isDatabaseNameUsedInCQ(oldName) {
|
||||||
|
return ErrDatabaseRenameCQConflict
|
||||||
|
}
|
||||||
|
// find database named oldName and rename it to newName
|
||||||
|
for i := range data.Databases {
|
||||||
|
if data.Databases[i].Name == oldName {
|
||||||
|
data.Databases[i].Name = newName
|
||||||
|
data.switchDatabaseUserPrivileges(oldName, newName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ErrDatabaseNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// isDatabaseNameUsedInCQ returns true if a database name is used in any continuous query
|
||||||
|
func (data *Data) isDatabaseNameUsedInCQ(dbName string) bool {
|
||||||
|
CQOnDb := fmt.Sprintf(" ON %s ", dbName)
|
||||||
|
CQIntoDb := fmt.Sprintf(" INTO \"%s\".", dbName)
|
||||||
|
CQFromDb := fmt.Sprintf(" FROM \"%s\".", dbName)
|
||||||
|
for i := range data.Databases {
|
||||||
|
for j := range data.Databases[i].ContinuousQueries {
|
||||||
|
query := data.Databases[i].ContinuousQueries[j].Query
|
||||||
|
if strings.Contains(query, CQOnDb) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if strings.Contains(query, CQIntoDb) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if strings.Contains(query, CQFromDb) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// switchDatabaseUserPrivileges changes the database associated with user privileges
|
||||||
|
func (data *Data) switchDatabaseUserPrivileges(oldDatabase, newDatabase string) error {
|
||||||
|
for i := range data.Users {
|
||||||
|
if p, ok := data.Users[i].Privileges[oldDatabase]; ok {
|
||||||
|
data.Users[i].Privileges[newDatabase] = p
|
||||||
|
delete(data.Users[i].Privileges, oldDatabase)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// RetentionPolicy returns a retention policy for a database by name.
|
// RetentionPolicy returns a retention policy for a database by name.
|
||||||
func (data *Data) RetentionPolicy(database, name string) (*RetentionPolicyInfo, error) {
|
func (data *Data) RetentionPolicy(database, name string) (*RetentionPolicyInfo, error) {
|
||||||
di := data.Database(database)
|
di := data.Database(database)
|
||||||
|
@ -479,6 +544,49 @@ func (data *Data) DropContinuousQuery(database, name string) error {
|
||||||
return ErrContinuousQueryNotFound
|
return ErrContinuousQueryNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateSubscription adds a named subscription to a database and retention policy.
|
||||||
|
func (data *Data) CreateSubscription(database, rp, name, mode string, destinations []string) error {
|
||||||
|
rpi, err := data.RetentionPolicy(database, rp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if rpi == nil {
|
||||||
|
return ErrRetentionPolicyNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the name doesn't already exist.
|
||||||
|
for i := range rpi.Subscriptions {
|
||||||
|
if rpi.Subscriptions[i].Name == name {
|
||||||
|
return ErrSubscriptionExists
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append new query.
|
||||||
|
rpi.Subscriptions = append(rpi.Subscriptions, SubscriptionInfo{
|
||||||
|
Name: name,
|
||||||
|
Mode: mode,
|
||||||
|
Destinations: destinations,
|
||||||
|
})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DropSubscription removes a subscription.
|
||||||
|
func (data *Data) DropSubscription(database, rp, name string) error {
|
||||||
|
rpi, err := data.RetentionPolicy(database, rp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range rpi.Subscriptions {
|
||||||
|
if rpi.Subscriptions[i].Name == name {
|
||||||
|
rpi.Subscriptions = append(rpi.Subscriptions[:i], rpi.Subscriptions[i+1:]...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ErrSubscriptionNotFound
|
||||||
|
}
|
||||||
|
|
||||||
// User returns a user by username.
|
// User returns a user by username.
|
||||||
func (data *Data) User(username string) *UserInfo {
|
func (data *Data) User(username string) *UserInfo {
|
||||||
for i := range data.Users {
|
for i := range data.Users {
|
||||||
|
@ -818,6 +926,7 @@ type RetentionPolicyInfo struct {
|
||||||
Duration time.Duration
|
Duration time.Duration
|
||||||
ShardGroupDuration time.Duration
|
ShardGroupDuration time.Duration
|
||||||
ShardGroups []ShardGroupInfo
|
ShardGroups []ShardGroupInfo
|
||||||
|
Subscriptions []SubscriptionInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRetentionPolicyInfo returns a new instance of RetentionPolicyInfo with defaults set.
|
// NewRetentionPolicyInfo returns a new instance of RetentionPolicyInfo with defaults set.
|
||||||
|
@ -894,6 +1003,12 @@ func (rpi *RetentionPolicyInfo) unmarshal(pb *internal.RetentionPolicyInfo) {
|
||||||
rpi.ShardGroups[i].unmarshal(x)
|
rpi.ShardGroups[i].unmarshal(x)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(pb.GetSubscriptions()) > 0 {
|
||||||
|
rpi.Subscriptions = make([]SubscriptionInfo, len(pb.GetSubscriptions()))
|
||||||
|
for i, x := range pb.GetSubscriptions() {
|
||||||
|
rpi.Subscriptions[i].unmarshal(x)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// clone returns a deep copy of rpi.
|
// clone returns a deep copy of rpi.
|
||||||
|
@ -1078,6 +1193,39 @@ func (si *ShardInfo) unmarshal(pb *internal.ShardInfo) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SubscriptionInfo struct {
|
||||||
|
Name string
|
||||||
|
Mode string
|
||||||
|
Destinations []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshal serializes to a protobuf representation.
|
||||||
|
func (si SubscriptionInfo) marshal() *internal.SubscriptionInfo {
|
||||||
|
pb := &internal.SubscriptionInfo{
|
||||||
|
Name: proto.String(si.Name),
|
||||||
|
Mode: proto.String(si.Mode),
|
||||||
|
}
|
||||||
|
|
||||||
|
pb.Destinations = make([]string, len(si.Destinations))
|
||||||
|
for i := range si.Destinations {
|
||||||
|
pb.Destinations[i] = si.Destinations[i]
|
||||||
|
}
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshal deserializes from a protobuf representation.
|
||||||
|
func (si *SubscriptionInfo) unmarshal(pb *internal.SubscriptionInfo) {
|
||||||
|
si.Name = pb.GetName()
|
||||||
|
si.Mode = pb.GetMode()
|
||||||
|
|
||||||
|
if len(pb.GetDestinations()) > 0 {
|
||||||
|
si.Destinations = make([]string, len(pb.GetDestinations()))
|
||||||
|
for i, h := range pb.GetDestinations() {
|
||||||
|
si.Destinations[i] = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ShardOwner represents a node that owns a shard.
|
// ShardOwner represents a node that owns a shard.
|
||||||
type ShardOwner struct {
|
type ShardOwner struct {
|
||||||
NodeID uint64
|
NodeID uint64
|
||||||
|
|
|
@ -135,6 +135,97 @@ func TestData_DropDatabase(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure a database can be renamed.
|
||||||
|
func TestData_RenameDatabase(t *testing.T) {
|
||||||
|
var data meta.Data
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
if err := data.CreateDatabase(fmt.Sprintf("db%d", i)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := data.RenameDatabase("db1", "db2"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if !reflect.DeepEqual(data.Databases, []meta.DatabaseInfo{{Name: "db0"}, {Name: "db2"}}) {
|
||||||
|
t.Fatalf("unexpected databases: %#v", data.Databases)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that user privileges are updated correctly when database is renamed.
|
||||||
|
func TestData_RenameDatabaseUpdatesPrivileges(t *testing.T) {
|
||||||
|
var data meta.Data
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
if err := data.CreateDatabase(fmt.Sprintf("db%d", i)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data.Users = []meta.UserInfo{{
|
||||||
|
Name: "susy",
|
||||||
|
Hash: "ABC123",
|
||||||
|
Admin: true,
|
||||||
|
Privileges: map[string]influxql.Privilege{
|
||||||
|
"db1": influxql.AllPrivileges, "db0": influxql.ReadPrivilege}}}
|
||||||
|
|
||||||
|
if err := data.RenameDatabase("db1", "db2"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if !reflect.DeepEqual(data.Users,
|
||||||
|
[]meta.UserInfo{{
|
||||||
|
Name: "susy",
|
||||||
|
Hash: "ABC123",
|
||||||
|
Admin: true,
|
||||||
|
Privileges: map[string]influxql.Privilege{
|
||||||
|
"db2": influxql.AllPrivileges, "db0": influxql.ReadPrivilege}}}) {
|
||||||
|
t.Fatalf("unexpected user privileges: %#v", data.Users)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that renaming a database without both old and new names returns an error.
|
||||||
|
func TestData_RenameDatabase_ErrNameRequired(t *testing.T) {
|
||||||
|
var data meta.Data
|
||||||
|
if err := data.RenameDatabase("", ""); err != meta.ErrDatabaseNameRequired {
|
||||||
|
t.Fatalf("unexpected error: %s", err)
|
||||||
|
}
|
||||||
|
if err := data.RenameDatabase("from_foo", ""); err != meta.ErrDatabaseNameRequired {
|
||||||
|
t.Fatalf("unexpected error: %s", err)
|
||||||
|
}
|
||||||
|
if err := data.RenameDatabase("", "to_foo"); err != meta.ErrDatabaseNameRequired {
|
||||||
|
t.Fatalf("unexpected error: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that renaming a database returns an error if there is a possibly conflicting CQ
|
||||||
|
func TestData_RenameDatabase_ErrDatabaseCQConflict(t *testing.T) {
|
||||||
|
var data meta.Data
|
||||||
|
if err := data.CreateDatabase("db0"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := data.CreateDatabase("db1"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := data.CreateContinuousQuery("db0", "cq0", `CREATE CONTINUOUS QUERY cq0 ON db0 BEGIN SELECT count() INTO "foo"."default"."bar" FROM "foo"."foobar" END`); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := data.CreateContinuousQuery("db1", "cq1", `CREATE CONTINUOUS QUERY cq1 ON db1 BEGIN SELECT count() INTO "db1"."default"."bar" FROM "db0"."foobar" END`); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := data.CreateContinuousQuery("db1", "cq2", `CREATE CONTINUOUS QUERY cq2 ON db1 BEGIN SELECT count() INTO "db0"."default"."bar" FROM "db1"."foobar" END`); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := data.CreateContinuousQuery("db1", "noconflict", `CREATE CONTINUOUS QUERY noconflict ON db1 BEGIN SELECT count() INTO "db1"."default"."bar" FROM "db1"."foobar" END`); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := data.RenameDatabase("db0", "db2"); err == nil {
|
||||||
|
t.Fatalf("unexpected rename database success despite cq conflict")
|
||||||
|
} else if err := data.DropContinuousQuery("db0", "cq0"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := data.RenameDatabase("db0", "db2"); err == nil {
|
||||||
|
t.Fatalf("unexpected rename database success despite cq conflict")
|
||||||
|
} else if err := data.DropContinuousQuery("db1", "cq1"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := data.RenameDatabase("db0", "db2"); err == nil {
|
||||||
|
t.Fatalf("unexpected rename database success despite cq conflict")
|
||||||
|
} else if err := data.DropContinuousQuery("db1", "cq2"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := data.RenameDatabase("db0", "db2"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure a retention policy can be created.
|
// Ensure a retention policy can be created.
|
||||||
func TestData_CreateRetentionPolicy(t *testing.T) {
|
func TestData_CreateRetentionPolicy(t *testing.T) {
|
||||||
data := meta.Data{Nodes: []meta.NodeInfo{{ID: 1}, {ID: 2}}}
|
data := meta.Data{Nodes: []meta.NodeInfo{{ID: 1}, {ID: 2}}}
|
||||||
|
@ -513,6 +604,52 @@ func TestData_DropContinuousQuery(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure a subscription can be created.
|
||||||
|
func TestData_CreateSubscription(t *testing.T) {
|
||||||
|
var data meta.Data
|
||||||
|
rpi := &meta.RetentionPolicyInfo{
|
||||||
|
Name: "rp0",
|
||||||
|
ReplicaN: 3,
|
||||||
|
}
|
||||||
|
if err := data.CreateDatabase("db0"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := data.CreateRetentionPolicy("db0", rpi); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := data.CreateSubscription("db0", "rp0", "s0", "ANY", []string{"udp://h0:1234", "udp://h1:1234"}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if !reflect.DeepEqual(data.Databases[0].RetentionPolicies[0].Subscriptions, []meta.SubscriptionInfo{
|
||||||
|
{Name: "s0", Mode: "ANY", Destinations: []string{"udp://h0:1234", "udp://h1:1234"}},
|
||||||
|
}) {
|
||||||
|
t.Fatalf("unexpected subscriptions: %#v", data.Databases[0].RetentionPolicies[0].Subscriptions)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure a subscription can be removed.
|
||||||
|
func TestData_DropSubscription(t *testing.T) {
|
||||||
|
var data meta.Data
|
||||||
|
rpi := &meta.RetentionPolicyInfo{
|
||||||
|
Name: "rp0",
|
||||||
|
ReplicaN: 3,
|
||||||
|
}
|
||||||
|
if err := data.CreateDatabase("db0"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := data.CreateRetentionPolicy("db0", rpi); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := data.CreateSubscription("db0", "rp0", "s0", "ANY", []string{"udp://h0:1234", "udp://h1:1234"}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := data.CreateSubscription("db0", "rp0", "s1", "ALL", []string{"udp://h0:1234", "udp://h1:1234"}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := data.DropSubscription("db0", "rp0", "s0"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if !reflect.DeepEqual(data.Databases[0].RetentionPolicies[0].Subscriptions, []meta.SubscriptionInfo{
|
||||||
|
{Name: "s1", Mode: "ALL", Destinations: []string{"udp://h0:1234", "udp://h1:1234"}},
|
||||||
|
}) {
|
||||||
|
t.Fatalf("unexpected subscriptions: %#v", data.Databases[0].RetentionPolicies[0].Subscriptions)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure a user can be created.
|
// Ensure a user can be created.
|
||||||
func TestData_CreateUser(t *testing.T) {
|
func TestData_CreateUser(t *testing.T) {
|
||||||
var data meta.Data
|
var data meta.Data
|
||||||
|
|
|
@ -47,6 +47,9 @@ var (
|
||||||
|
|
||||||
// ErrDatabaseNameRequired is returned when creating a database without a name.
|
// ErrDatabaseNameRequired is returned when creating a database without a name.
|
||||||
ErrDatabaseNameRequired = newError("database name required")
|
ErrDatabaseNameRequired = newError("database name required")
|
||||||
|
|
||||||
|
// ErrDatabaseRenameCQConflict is returned when attempting to rename a database in use by a CQ.
|
||||||
|
ErrDatabaseRenameCQConflict = newError("database rename conflict with existing continuous query")
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -97,6 +100,14 @@ var (
|
||||||
ErrContinuousQueryNotFound = newError("continuous query not found")
|
ErrContinuousQueryNotFound = newError("continuous query not found")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrSubscriptionExists is returned when creating an already existing subscription.
|
||||||
|
ErrSubscriptionExists = newError("subscription already exists")
|
||||||
|
|
||||||
|
// ErrSubscriptionNotFound is returned when removing a subscription that doesn't exist.
|
||||||
|
ErrSubscriptionNotFound = newError("subscription not found")
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrUserExists is returned when creating an already existing user.
|
// ErrUserExists is returned when creating an already existing user.
|
||||||
ErrUserExists = newError("user already exists")
|
ErrUserExists = newError("user already exists")
|
||||||
|
|
|
@ -15,6 +15,7 @@ It has these top-level messages:
|
||||||
RetentionPolicyInfo
|
RetentionPolicyInfo
|
||||||
ShardGroupInfo
|
ShardGroupInfo
|
||||||
ShardInfo
|
ShardInfo
|
||||||
|
SubscriptionInfo
|
||||||
ShardOwner
|
ShardOwner
|
||||||
ContinuousQueryInfo
|
ContinuousQueryInfo
|
||||||
UserInfo
|
UserInfo
|
||||||
|
@ -39,6 +40,9 @@ It has these top-level messages:
|
||||||
SetDataCommand
|
SetDataCommand
|
||||||
SetAdminPrivilegeCommand
|
SetAdminPrivilegeCommand
|
||||||
UpdateNodeCommand
|
UpdateNodeCommand
|
||||||
|
RenameDatabaseCommand
|
||||||
|
CreateSubscriptionCommand
|
||||||
|
DropSubscriptionCommand
|
||||||
Response
|
Response
|
||||||
ResponseHeader
|
ResponseHeader
|
||||||
ErrorResponse
|
ErrorResponse
|
||||||
|
@ -116,6 +120,9 @@ const (
|
||||||
Command_SetDataCommand Command_Type = 17
|
Command_SetDataCommand Command_Type = 17
|
||||||
Command_SetAdminPrivilegeCommand Command_Type = 18
|
Command_SetAdminPrivilegeCommand Command_Type = 18
|
||||||
Command_UpdateNodeCommand Command_Type = 19
|
Command_UpdateNodeCommand Command_Type = 19
|
||||||
|
Command_RenameDatabaseCommand Command_Type = 20
|
||||||
|
Command_CreateSubscriptionCommand Command_Type = 22
|
||||||
|
Command_DropSubscriptionCommand Command_Type = 23
|
||||||
)
|
)
|
||||||
|
|
||||||
var Command_Type_name = map[int32]string{
|
var Command_Type_name = map[int32]string{
|
||||||
|
@ -138,6 +145,9 @@ var Command_Type_name = map[int32]string{
|
||||||
17: "SetDataCommand",
|
17: "SetDataCommand",
|
||||||
18: "SetAdminPrivilegeCommand",
|
18: "SetAdminPrivilegeCommand",
|
||||||
19: "UpdateNodeCommand",
|
19: "UpdateNodeCommand",
|
||||||
|
20: "RenameDatabaseCommand",
|
||||||
|
22: "CreateSubscriptionCommand",
|
||||||
|
23: "DropSubscriptionCommand",
|
||||||
}
|
}
|
||||||
var Command_Type_value = map[string]int32{
|
var Command_Type_value = map[string]int32{
|
||||||
"CreateNodeCommand": 1,
|
"CreateNodeCommand": 1,
|
||||||
|
@ -159,6 +169,9 @@ var Command_Type_value = map[string]int32{
|
||||||
"SetDataCommand": 17,
|
"SetDataCommand": 17,
|
||||||
"SetAdminPrivilegeCommand": 18,
|
"SetAdminPrivilegeCommand": 18,
|
||||||
"UpdateNodeCommand": 19,
|
"UpdateNodeCommand": 19,
|
||||||
|
"RenameDatabaseCommand": 20,
|
||||||
|
"CreateSubscriptionCommand": 22,
|
||||||
|
"DropSubscriptionCommand": 23,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x Command_Type) Enum() *Command_Type {
|
func (x Command_Type) Enum() *Command_Type {
|
||||||
|
@ -328,6 +341,7 @@ type RetentionPolicyInfo struct {
|
||||||
ShardGroupDuration *int64 `protobuf:"varint,3,req,name=ShardGroupDuration" json:"ShardGroupDuration,omitempty"`
|
ShardGroupDuration *int64 `protobuf:"varint,3,req,name=ShardGroupDuration" json:"ShardGroupDuration,omitempty"`
|
||||||
ReplicaN *uint32 `protobuf:"varint,4,req,name=ReplicaN" json:"ReplicaN,omitempty"`
|
ReplicaN *uint32 `protobuf:"varint,4,req,name=ReplicaN" json:"ReplicaN,omitempty"`
|
||||||
ShardGroups []*ShardGroupInfo `protobuf:"bytes,5,rep,name=ShardGroups" json:"ShardGroups,omitempty"`
|
ShardGroups []*ShardGroupInfo `protobuf:"bytes,5,rep,name=ShardGroups" json:"ShardGroups,omitempty"`
|
||||||
|
Subscriptions []*SubscriptionInfo `protobuf:"bytes,6,rep,name=Subscriptions" json:"Subscriptions,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -370,6 +384,13 @@ func (m *RetentionPolicyInfo) GetShardGroups() []*ShardGroupInfo {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *RetentionPolicyInfo) GetSubscriptions() []*SubscriptionInfo {
|
||||||
|
if m != nil {
|
||||||
|
return m.Subscriptions
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type ShardGroupInfo struct {
|
type ShardGroupInfo struct {
|
||||||
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"`
|
ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"`
|
||||||
StartTime *int64 `protobuf:"varint,2,req,name=StartTime" json:"StartTime,omitempty"`
|
StartTime *int64 `protobuf:"varint,2,req,name=StartTime" json:"StartTime,omitempty"`
|
||||||
|
@ -450,6 +471,38 @@ func (m *ShardInfo) GetOwners() []*ShardOwner {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SubscriptionInfo struct {
|
||||||
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
|
Mode *string `protobuf:"bytes,2,req,name=Mode" json:"Mode,omitempty"`
|
||||||
|
Destinations []string `protobuf:"bytes,3,rep,name=Destinations" json:"Destinations,omitempty"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SubscriptionInfo) Reset() { *m = SubscriptionInfo{} }
|
||||||
|
func (m *SubscriptionInfo) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*SubscriptionInfo) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (m *SubscriptionInfo) GetName() string {
|
||||||
|
if m != nil && m.Name != nil {
|
||||||
|
return *m.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SubscriptionInfo) GetMode() string {
|
||||||
|
if m != nil && m.Mode != nil {
|
||||||
|
return *m.Mode
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SubscriptionInfo) GetDestinations() []string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Destinations
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type ShardOwner struct {
|
type ShardOwner struct {
|
||||||
NodeID *uint64 `protobuf:"varint,1,req,name=NodeID" json:"NodeID,omitempty"`
|
NodeID *uint64 `protobuf:"varint,1,req,name=NodeID" json:"NodeID,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
@ -1225,6 +1278,134 @@ var E_UpdateNodeCommand_Command = &proto.ExtensionDesc{
|
||||||
Tag: "bytes,119,opt,name=command",
|
Tag: "bytes,119,opt,name=command",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RenameDatabaseCommand struct {
|
||||||
|
OldName *string `protobuf:"bytes,1,req,name=oldName" json:"oldName,omitempty"`
|
||||||
|
NewName *string `protobuf:"bytes,2,req,name=newName" json:"newName,omitempty"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *RenameDatabaseCommand) Reset() { *m = RenameDatabaseCommand{} }
|
||||||
|
func (m *RenameDatabaseCommand) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*RenameDatabaseCommand) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (m *RenameDatabaseCommand) GetOldName() string {
|
||||||
|
if m != nil && m.OldName != nil {
|
||||||
|
return *m.OldName
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *RenameDatabaseCommand) GetNewName() string {
|
||||||
|
if m != nil && m.NewName != nil {
|
||||||
|
return *m.NewName
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var E_RenameDatabaseCommand_Command = &proto.ExtensionDesc{
|
||||||
|
ExtendedType: (*Command)(nil),
|
||||||
|
ExtensionType: (*RenameDatabaseCommand)(nil),
|
||||||
|
Field: 120,
|
||||||
|
Name: "internal.RenameDatabaseCommand.command",
|
||||||
|
Tag: "bytes,120,opt,name=command",
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreateSubscriptionCommand struct {
|
||||||
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
|
Database *string `protobuf:"bytes,2,req,name=Database" json:"Database,omitempty"`
|
||||||
|
RetentionPolicy *string `protobuf:"bytes,3,req,name=RetentionPolicy" json:"RetentionPolicy,omitempty"`
|
||||||
|
Mode *string `protobuf:"bytes,4,req,name=Mode" json:"Mode,omitempty"`
|
||||||
|
Destinations []string `protobuf:"bytes,5,rep,name=Destinations" json:"Destinations,omitempty"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CreateSubscriptionCommand) Reset() { *m = CreateSubscriptionCommand{} }
|
||||||
|
func (m *CreateSubscriptionCommand) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*CreateSubscriptionCommand) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (m *CreateSubscriptionCommand) GetName() string {
|
||||||
|
if m != nil && m.Name != nil {
|
||||||
|
return *m.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CreateSubscriptionCommand) GetDatabase() string {
|
||||||
|
if m != nil && m.Database != nil {
|
||||||
|
return *m.Database
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CreateSubscriptionCommand) GetRetentionPolicy() string {
|
||||||
|
if m != nil && m.RetentionPolicy != nil {
|
||||||
|
return *m.RetentionPolicy
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CreateSubscriptionCommand) GetMode() string {
|
||||||
|
if m != nil && m.Mode != nil {
|
||||||
|
return *m.Mode
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CreateSubscriptionCommand) GetDestinations() []string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Destinations
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var E_CreateSubscriptionCommand_Command = &proto.ExtensionDesc{
|
||||||
|
ExtendedType: (*Command)(nil),
|
||||||
|
ExtensionType: (*CreateSubscriptionCommand)(nil),
|
||||||
|
Field: 121,
|
||||||
|
Name: "internal.CreateSubscriptionCommand.command",
|
||||||
|
Tag: "bytes,121,opt,name=command",
|
||||||
|
}
|
||||||
|
|
||||||
|
type DropSubscriptionCommand struct {
|
||||||
|
Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"`
|
||||||
|
Database *string `protobuf:"bytes,2,req,name=Database" json:"Database,omitempty"`
|
||||||
|
RetentionPolicy *string `protobuf:"bytes,3,req,name=RetentionPolicy" json:"RetentionPolicy,omitempty"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *DropSubscriptionCommand) Reset() { *m = DropSubscriptionCommand{} }
|
||||||
|
func (m *DropSubscriptionCommand) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*DropSubscriptionCommand) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (m *DropSubscriptionCommand) GetName() string {
|
||||||
|
if m != nil && m.Name != nil {
|
||||||
|
return *m.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *DropSubscriptionCommand) GetDatabase() string {
|
||||||
|
if m != nil && m.Database != nil {
|
||||||
|
return *m.Database
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *DropSubscriptionCommand) GetRetentionPolicy() string {
|
||||||
|
if m != nil && m.RetentionPolicy != nil {
|
||||||
|
return *m.RetentionPolicy
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var E_DropSubscriptionCommand_Command = &proto.ExtensionDesc{
|
||||||
|
ExtendedType: (*Command)(nil),
|
||||||
|
ExtensionType: (*DropSubscriptionCommand)(nil),
|
||||||
|
Field: 122,
|
||||||
|
Name: "internal.DropSubscriptionCommand.command",
|
||||||
|
Tag: "bytes,122,opt,name=command",
|
||||||
|
}
|
||||||
|
|
||||||
type Response struct {
|
type Response struct {
|
||||||
OK *bool `protobuf:"varint,1,req,name=OK" json:"OK,omitempty"`
|
OK *bool `protobuf:"varint,1,req,name=OK" json:"OK,omitempty"`
|
||||||
Error *string `protobuf:"bytes,2,opt,name=Error" json:"Error,omitempty"`
|
Error *string `protobuf:"bytes,2,opt,name=Error" json:"Error,omitempty"`
|
||||||
|
@ -1453,4 +1634,7 @@ func init() {
|
||||||
proto.RegisterExtension(E_SetDataCommand_Command)
|
proto.RegisterExtension(E_SetDataCommand_Command)
|
||||||
proto.RegisterExtension(E_SetAdminPrivilegeCommand_Command)
|
proto.RegisterExtension(E_SetAdminPrivilegeCommand_Command)
|
||||||
proto.RegisterExtension(E_UpdateNodeCommand_Command)
|
proto.RegisterExtension(E_UpdateNodeCommand_Command)
|
||||||
|
proto.RegisterExtension(E_RenameDatabaseCommand_Command)
|
||||||
|
proto.RegisterExtension(E_CreateSubscriptionCommand_Command)
|
||||||
|
proto.RegisterExtension(E_DropSubscriptionCommand_Command)
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,6 +38,7 @@ message RetentionPolicyInfo {
|
||||||
required int64 ShardGroupDuration = 3;
|
required int64 ShardGroupDuration = 3;
|
||||||
required uint32 ReplicaN = 4;
|
required uint32 ReplicaN = 4;
|
||||||
repeated ShardGroupInfo ShardGroups = 5;
|
repeated ShardGroupInfo ShardGroups = 5;
|
||||||
|
repeated SubscriptionInfo Subscriptions = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ShardGroupInfo {
|
message ShardGroupInfo {
|
||||||
|
@ -54,6 +55,12 @@ message ShardInfo {
|
||||||
repeated ShardOwner Owners = 3;
|
repeated ShardOwner Owners = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message SubscriptionInfo{
|
||||||
|
required string Name = 1;
|
||||||
|
required string Mode = 2;
|
||||||
|
repeated string Destinations = 3;
|
||||||
|
}
|
||||||
|
|
||||||
message ShardOwner {
|
message ShardOwner {
|
||||||
required uint64 NodeID = 1;
|
required uint64 NodeID = 1;
|
||||||
}
|
}
|
||||||
|
@ -105,6 +112,9 @@ message Command {
|
||||||
SetDataCommand = 17;
|
SetDataCommand = 17;
|
||||||
SetAdminPrivilegeCommand = 18;
|
SetAdminPrivilegeCommand = 18;
|
||||||
UpdateNodeCommand = 19;
|
UpdateNodeCommand = 19;
|
||||||
|
RenameDatabaseCommand = 20;
|
||||||
|
CreateSubscriptionCommand = 22;
|
||||||
|
DropSubscriptionCommand = 23;
|
||||||
}
|
}
|
||||||
|
|
||||||
required Type type = 1;
|
required Type type = 1;
|
||||||
|
@ -266,6 +276,35 @@ message UpdateNodeCommand {
|
||||||
required string Host = 2;
|
required string Host = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message RenameDatabaseCommand {
|
||||||
|
extend Command {
|
||||||
|
optional RenameDatabaseCommand command = 120;
|
||||||
|
}
|
||||||
|
required string oldName = 1;
|
||||||
|
required string newName = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CreateSubscriptionCommand {
|
||||||
|
extend Command {
|
||||||
|
optional CreateSubscriptionCommand command = 121;
|
||||||
|
}
|
||||||
|
required string Name = 1;
|
||||||
|
required string Database = 2;
|
||||||
|
required string RetentionPolicy = 3;
|
||||||
|
required string Mode = 4;
|
||||||
|
repeated string Destinations = 5;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
message DropSubscriptionCommand {
|
||||||
|
extend Command {
|
||||||
|
optional DropSubscriptionCommand command = 122;
|
||||||
|
}
|
||||||
|
required string Name = 1;
|
||||||
|
required string Database = 2;
|
||||||
|
required string RetentionPolicy = 3;
|
||||||
|
}
|
||||||
|
|
||||||
message Response {
|
message Response {
|
||||||
required bool OK = 1;
|
required bool OK = 1;
|
||||||
optional string Error = 2;
|
optional string Error = 2;
|
||||||
|
|
|
@ -78,6 +78,8 @@ func (r *localRaft) updateMetaData(ms *Data) {
|
||||||
r.store.Logger.Printf("Updating metastore to term=%v index=%v", ms.Term, ms.Index)
|
r.store.Logger.Printf("Updating metastore to term=%v index=%v", ms.Term, ms.Index)
|
||||||
r.store.mu.Lock()
|
r.store.mu.Lock()
|
||||||
r.store.data = ms
|
r.store.data = ms
|
||||||
|
// Signal any blocked goroutines that the meta store has been updated
|
||||||
|
r.store.notifyChanged()
|
||||||
r.store.mu.Unlock()
|
r.store.mu.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -366,6 +368,8 @@ func (r *remoteRaft) updateMetaData(ms *Data) {
|
||||||
r.store.Logger.Printf("Updating metastore to term=%v index=%v", ms.Term, ms.Index)
|
r.store.Logger.Printf("Updating metastore to term=%v index=%v", ms.Term, ms.Index)
|
||||||
r.store.mu.Lock()
|
r.store.mu.Lock()
|
||||||
r.store.data = ms
|
r.store.data = ms
|
||||||
|
// Signal any blocked goroutines that the meta store has been updated
|
||||||
|
r.store.notifyChanged()
|
||||||
r.store.mu.Unlock()
|
r.store.mu.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ type StatementExecutor struct {
|
||||||
Databases() ([]DatabaseInfo, error)
|
Databases() ([]DatabaseInfo, error)
|
||||||
CreateDatabase(name string) (*DatabaseInfo, error)
|
CreateDatabase(name string) (*DatabaseInfo, error)
|
||||||
DropDatabase(name string) error
|
DropDatabase(name string) error
|
||||||
|
RenameDatabase(oldName, newName string) error
|
||||||
|
|
||||||
DefaultRetentionPolicy(database string) (*RetentionPolicyInfo, error)
|
DefaultRetentionPolicy(database string) (*RetentionPolicyInfo, error)
|
||||||
CreateRetentionPolicy(database string, rpi *RetentionPolicyInfo) (*RetentionPolicyInfo, error)
|
CreateRetentionPolicy(database string, rpi *RetentionPolicyInfo) (*RetentionPolicyInfo, error)
|
||||||
|
@ -41,6 +42,9 @@ type StatementExecutor struct {
|
||||||
|
|
||||||
CreateContinuousQuery(database, name, query string) error
|
CreateContinuousQuery(database, name, query string) error
|
||||||
DropContinuousQuery(database, name string) error
|
DropContinuousQuery(database, name string) error
|
||||||
|
|
||||||
|
CreateSubscription(database, rp, name, mode string, destinations []string) error
|
||||||
|
DropSubscription(database, rp, name string) error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,6 +73,8 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement) *influxql.
|
||||||
return e.executeGrantStatement(stmt)
|
return e.executeGrantStatement(stmt)
|
||||||
case *influxql.GrantAdminStatement:
|
case *influxql.GrantAdminStatement:
|
||||||
return e.executeGrantAdminStatement(stmt)
|
return e.executeGrantAdminStatement(stmt)
|
||||||
|
case *influxql.AlterDatabaseRenameStatement:
|
||||||
|
return e.executeAlterDatabaseRenameStatement(stmt)
|
||||||
case *influxql.RevokeStatement:
|
case *influxql.RevokeStatement:
|
||||||
return e.executeRevokeStatement(stmt)
|
return e.executeRevokeStatement(stmt)
|
||||||
case *influxql.RevokeAdminStatement:
|
case *influxql.RevokeAdminStatement:
|
||||||
|
@ -93,6 +99,12 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement) *influxql.
|
||||||
return e.executeShowStatsStatement(stmt)
|
return e.executeShowStatsStatement(stmt)
|
||||||
case *influxql.DropServerStatement:
|
case *influxql.DropServerStatement:
|
||||||
return e.executeDropServerStatement(stmt)
|
return e.executeDropServerStatement(stmt)
|
||||||
|
case *influxql.CreateSubscriptionStatement:
|
||||||
|
return e.executeCreateSubscriptionStatement(stmt)
|
||||||
|
case *influxql.DropSubscriptionStatement:
|
||||||
|
return e.executeDropSubscriptionStatement(stmt)
|
||||||
|
case *influxql.ShowSubscriptionsStatement:
|
||||||
|
return e.executeShowSubscriptionsStatement(stmt)
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("unsupported statement type: %T", stmt))
|
panic(fmt.Sprintf("unsupported statement type: %T", stmt))
|
||||||
}
|
}
|
||||||
|
@ -212,6 +224,10 @@ func (e *StatementExecutor) executeGrantAdminStatement(stmt *influxql.GrantAdmin
|
||||||
return &influxql.Result{Err: e.Store.SetAdminPrivilege(stmt.User, true)}
|
return &influxql.Result{Err: e.Store.SetAdminPrivilege(stmt.User, true)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *StatementExecutor) executeAlterDatabaseRenameStatement(q *influxql.AlterDatabaseRenameStatement) *influxql.Result {
|
||||||
|
return &influxql.Result{Err: e.Store.RenameDatabase(q.OldName, q.NewName)}
|
||||||
|
}
|
||||||
|
|
||||||
func (e *StatementExecutor) executeRevokeStatement(stmt *influxql.RevokeStatement) *influxql.Result {
|
func (e *StatementExecutor) executeRevokeStatement(stmt *influxql.RevokeStatement) *influxql.Result {
|
||||||
priv := influxql.NoPrivileges
|
priv := influxql.NoPrivileges
|
||||||
|
|
||||||
|
@ -319,6 +335,39 @@ func (e *StatementExecutor) executeShowContinuousQueriesStatement(stmt *influxql
|
||||||
return &influxql.Result{Series: rows}
|
return &influxql.Result{Series: rows}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *StatementExecutor) executeCreateSubscriptionStatement(q *influxql.CreateSubscriptionStatement) *influxql.Result {
|
||||||
|
return &influxql.Result{
|
||||||
|
Err: e.Store.CreateSubscription(q.Database, q.RetentionPolicy, q.Name, q.Mode, q.Destinations),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *StatementExecutor) executeDropSubscriptionStatement(q *influxql.DropSubscriptionStatement) *influxql.Result {
|
||||||
|
return &influxql.Result{
|
||||||
|
Err: e.Store.DropSubscription(q.Database, q.RetentionPolicy, q.Name),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *StatementExecutor) executeShowSubscriptionsStatement(stmt *influxql.ShowSubscriptionsStatement) *influxql.Result {
|
||||||
|
dis, err := e.Store.Databases()
|
||||||
|
if err != nil {
|
||||||
|
return &influxql.Result{Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
rows := []*models.Row{}
|
||||||
|
for _, di := range dis {
|
||||||
|
row := &models.Row{Columns: []string{"retention_policy", "name", "mode", "destinations"}, Name: di.Name}
|
||||||
|
for _, rpi := range di.RetentionPolicies {
|
||||||
|
for _, si := range rpi.Subscriptions {
|
||||||
|
row.Values = append(row.Values, []interface{}{rpi.Name, si.Name, si.Mode, si.Destinations})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(row.Values) > 0 {
|
||||||
|
rows = append(rows, row)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &influxql.Result{Series: rows}
|
||||||
|
}
|
||||||
|
|
||||||
func (e *StatementExecutor) executeShowShardsStatement(stmt *influxql.ShowShardsStatement) *influxql.Result {
|
func (e *StatementExecutor) executeShowShardsStatement(stmt *influxql.ShowShardsStatement) *influxql.Result {
|
||||||
dis, err := e.Store.Databases()
|
dis, err := e.Store.Databases()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
188
Godeps/_workspace/src/github.com/influxdb/influxdb/meta/statement_executor_test.go
generated
vendored
188
Godeps/_workspace/src/github.com/influxdb/influxdb/meta/statement_executor_test.go
generated
vendored
|
@ -46,6 +46,26 @@ func TestStatementExecutor_ExecuteStatement_DropDatabase(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure an ALTER DATABASE ... RENAME TO ... statement can be executed.
|
||||||
|
func TestStatementExecutor_ExecuteStatement_AlterDatabaseRename(t *testing.T) {
|
||||||
|
e := NewStatementExecutor()
|
||||||
|
e.Store.RenameDatabaseFn = func(oldName, newName string) error {
|
||||||
|
if oldName != "old_foo" {
|
||||||
|
t.Fatalf("unexpected name: %s", oldName)
|
||||||
|
}
|
||||||
|
if newName != "new_foo" {
|
||||||
|
t.Fatalf("unexpected name: %s", newName)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if res := e.ExecuteStatement(influxql.MustParseStatement(`ALTER DATABASE old_foo RENAME TO new_foo`)); res.Err != nil {
|
||||||
|
t.Fatal(res.Err)
|
||||||
|
} else if res.Series != nil {
|
||||||
|
t.Fatalf("unexpected rows: %#v", res.Series)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure a SHOW DATABASES statement can be executed.
|
// Ensure a SHOW DATABASES statement can be executed.
|
||||||
func TestStatementExecutor_ExecuteStatement_ShowDatabases(t *testing.T) {
|
func TestStatementExecutor_ExecuteStatement_ShowDatabases(t *testing.T) {
|
||||||
e := NewStatementExecutor()
|
e := NewStatementExecutor()
|
||||||
|
@ -786,6 +806,159 @@ func TestStatementExecutor_ExecuteStatement_ShowContinuousQueries_Err(t *testing
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure a CREATE SUBSCRIPTION statement can be executed.
|
||||||
|
func TestStatementExecutor_ExecuteStatement_CreateSubscription(t *testing.T) {
|
||||||
|
e := NewStatementExecutor()
|
||||||
|
e.Store.CreateSubscriptionFn = func(database, rp, name, mode string, destinations []string) error {
|
||||||
|
if database != "db0" {
|
||||||
|
t.Fatalf("unexpected database: %s", database)
|
||||||
|
} else if rp != "rp0" {
|
||||||
|
t.Fatalf("unexpected rp: %s", rp)
|
||||||
|
} else if name != "s0" {
|
||||||
|
t.Fatalf("unexpected name: %s", name)
|
||||||
|
} else if mode != "ANY" {
|
||||||
|
t.Fatalf("unexpected mode: %s", mode)
|
||||||
|
} else if len(destinations) != 2 {
|
||||||
|
t.Fatalf("unexpected destinations: %s", destinations)
|
||||||
|
} else if destinations[0] != "udp://h0:1234" {
|
||||||
|
t.Fatalf("unexpected destinations[0]: %s", destinations[0])
|
||||||
|
} else if destinations[1] != "udp://h1:1234" {
|
||||||
|
t.Fatalf("unexpected destinations[1]: %s", destinations[1])
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt := influxql.MustParseStatement(`CREATE SUBSCRIPTION s0 ON db0.rp0 DESTINATIONS ANY 'udp://h0:1234', 'udp://h1:1234'`)
|
||||||
|
if res := e.ExecuteStatement(stmt); res.Err != nil {
|
||||||
|
t.Fatal(res.Err)
|
||||||
|
} else if res.Series != nil {
|
||||||
|
t.Fatalf("unexpected rows: %#v", res.Series)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure a CREATE SUBSCRIPTION statement can return an error from the store.
|
||||||
|
func TestStatementExecutor_ExecuteStatement_CreateSubscription_Err(t *testing.T) {
|
||||||
|
e := NewStatementExecutor()
|
||||||
|
e.Store.CreateSubscriptionFn = func(database, rp, name, mode string, destinations []string) error {
|
||||||
|
return errors.New("marker")
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt := influxql.MustParseStatement(`CREATE SUBSCRIPTION s0 ON db0.rp0 DESTINATIONS ANY 'udp://h0:1234', 'udp://h1:1234'`)
|
||||||
|
if res := e.ExecuteStatement(stmt); res.Err == nil || res.Err.Error() != "marker" {
|
||||||
|
t.Fatalf("unexpected error: %s", res.Err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure a DROP SUBSCRIPTION statement can be executed.
|
||||||
|
func TestStatementExecutor_ExecuteStatement_DropSubscription(t *testing.T) {
|
||||||
|
e := NewStatementExecutor()
|
||||||
|
e.Store.DropSubscriptionFn = func(database, rp, name string) error {
|
||||||
|
if database != "db0" {
|
||||||
|
t.Fatalf("unexpected database: %s", database)
|
||||||
|
} else if rp != "rp0" {
|
||||||
|
t.Fatalf("unexpected rp: %s", rp)
|
||||||
|
} else if name != "s0" {
|
||||||
|
t.Fatalf("unexpected name: %s", name)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt := influxql.MustParseStatement(`DROP SUBSCRIPTION s0 ON db0.rp0`)
|
||||||
|
if res := e.ExecuteStatement(stmt); res.Err != nil {
|
||||||
|
t.Fatal(res.Err)
|
||||||
|
} else if res.Series != nil {
|
||||||
|
t.Fatalf("unexpected rows: %#v", res.Series)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure a DROP SUBSCRIPTION statement can return an error from the store.
|
||||||
|
func TestStatementExecutor_ExecuteStatement_DropSubscription_Err(t *testing.T) {
|
||||||
|
e := NewStatementExecutor()
|
||||||
|
e.Store.DropSubscriptionFn = func(database, rp, name string) error {
|
||||||
|
return errors.New("marker")
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt := influxql.MustParseStatement(`DROP SUBSCRIPTION s0 ON db0.rp0`)
|
||||||
|
if res := e.ExecuteStatement(stmt); res.Err == nil || res.Err.Error() != "marker" {
|
||||||
|
t.Fatalf("unexpected error: %s", res.Err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure a SHOW SUBSCRIPTIONS statement can be executed.
|
||||||
|
func TestStatementExecutor_ExecuteStatement_ShowSubscriptions(t *testing.T) {
|
||||||
|
e := NewStatementExecutor()
|
||||||
|
e.Store.DatabasesFn = func() ([]meta.DatabaseInfo, error) {
|
||||||
|
return []meta.DatabaseInfo{
|
||||||
|
{
|
||||||
|
Name: "db0",
|
||||||
|
RetentionPolicies: []meta.RetentionPolicyInfo{
|
||||||
|
{
|
||||||
|
Name: "rp0",
|
||||||
|
Subscriptions: []meta.SubscriptionInfo{
|
||||||
|
{Name: "s0", Mode: "ALL", Destinations: []string{"udp://h0:1234", "udp://h1:1234"}},
|
||||||
|
{Name: "s1", Mode: "ANY", Destinations: []string{"udp://h2:1234", "udp://h3:1234"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "rp1",
|
||||||
|
Subscriptions: []meta.SubscriptionInfo{
|
||||||
|
{Name: "s2", Mode: "ALL", Destinations: []string{"udp://h4:1234", "udp://h5:1234"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "db1",
|
||||||
|
RetentionPolicies: []meta.RetentionPolicyInfo{
|
||||||
|
{
|
||||||
|
Name: "rp2",
|
||||||
|
Subscriptions: []meta.SubscriptionInfo{
|
||||||
|
{Name: "s3", Mode: "ANY", Destinations: []string{"udp://h6:1234", "udp://h7:1234"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt := influxql.MustParseStatement(`SHOW SUBSCRIPTIONS`)
|
||||||
|
if res := e.ExecuteStatement(stmt); res.Err != nil {
|
||||||
|
t.Fatal(res.Err)
|
||||||
|
} else if !reflect.DeepEqual(res.Series, models.Rows{
|
||||||
|
{
|
||||||
|
Name: "db0",
|
||||||
|
Columns: []string{"retention_policy", "name", "mode", "destinations"},
|
||||||
|
Values: [][]interface{}{
|
||||||
|
{"rp0", "s0", "ALL", []string{"udp://h0:1234", "udp://h1:1234"}},
|
||||||
|
{"rp0", "s1", "ANY", []string{"udp://h2:1234", "udp://h3:1234"}},
|
||||||
|
{"rp1", "s2", "ALL", []string{"udp://h4:1234", "udp://h5:1234"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "db1",
|
||||||
|
Columns: []string{"retention_policy", "name", "mode", "destinations"},
|
||||||
|
Values: [][]interface{}{
|
||||||
|
{"rp2", "s3", "ANY", []string{"udp://h6:1234", "udp://h7:1234"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}) {
|
||||||
|
t.Fatalf("unexpected rows: %s", spew.Sdump(res.Series))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure a SHOW SUBSCRIPTIONS statement can return an error from the store.
|
||||||
|
func TestStatementExecutor_ExecuteStatement_ShowSubscriptions_Err(t *testing.T) {
|
||||||
|
e := NewStatementExecutor()
|
||||||
|
e.Store.DatabasesFn = func() ([]meta.DatabaseInfo, error) {
|
||||||
|
return nil, errors.New("marker")
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt := influxql.MustParseStatement(`SHOW SUBSCRIPTIONS`)
|
||||||
|
if res := e.ExecuteStatement(stmt); res.Err == nil || res.Err.Error() != "marker" {
|
||||||
|
t.Fatal(res.Err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure that executing an unsupported statement will panic.
|
// Ensure that executing an unsupported statement will panic.
|
||||||
func TestStatementExecutor_ExecuteStatement_Unsupported(t *testing.T) {
|
func TestStatementExecutor_ExecuteStatement_Unsupported(t *testing.T) {
|
||||||
var panicked bool
|
var panicked bool
|
||||||
|
@ -883,6 +1056,7 @@ type StatementExecutorStore struct {
|
||||||
CreateDatabaseFn func(name string) (*meta.DatabaseInfo, error)
|
CreateDatabaseFn func(name string) (*meta.DatabaseInfo, error)
|
||||||
DropDatabaseFn func(name string) error
|
DropDatabaseFn func(name string) error
|
||||||
DeleteNodeFn func(nodeID uint64, force bool) error
|
DeleteNodeFn func(nodeID uint64, force bool) error
|
||||||
|
RenameDatabaseFn func(oldName, newName string) error
|
||||||
DefaultRetentionPolicyFn func(database string) (*meta.RetentionPolicyInfo, error)
|
DefaultRetentionPolicyFn func(database string) (*meta.RetentionPolicyInfo, error)
|
||||||
CreateRetentionPolicyFn func(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error)
|
CreateRetentionPolicyFn func(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error)
|
||||||
UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate) error
|
UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate) error
|
||||||
|
@ -899,6 +1073,8 @@ type StatementExecutorStore struct {
|
||||||
ContinuousQueriesFn func() ([]meta.ContinuousQueryInfo, error)
|
ContinuousQueriesFn func() ([]meta.ContinuousQueryInfo, error)
|
||||||
CreateContinuousQueryFn func(database, name, query string) error
|
CreateContinuousQueryFn func(database, name, query string) error
|
||||||
DropContinuousQueryFn func(database, name string) error
|
DropContinuousQueryFn func(database, name string) error
|
||||||
|
CreateSubscriptionFn func(database, rp, name, typ string, hosts []string) error
|
||||||
|
DropSubscriptionFn func(database, rp, name string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StatementExecutorStore) Node(id uint64) (*meta.NodeInfo, error) {
|
func (s *StatementExecutorStore) Node(id uint64) (*meta.NodeInfo, error) {
|
||||||
|
@ -940,6 +1116,10 @@ func (s *StatementExecutorStore) DropDatabase(name string) error {
|
||||||
return s.DropDatabaseFn(name)
|
return s.DropDatabaseFn(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *StatementExecutorStore) RenameDatabase(oldName, newName string) error {
|
||||||
|
return s.RenameDatabaseFn(oldName, newName)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *StatementExecutorStore) DefaultRetentionPolicy(database string) (*meta.RetentionPolicyInfo, error) {
|
func (s *StatementExecutorStore) DefaultRetentionPolicy(database string) (*meta.RetentionPolicyInfo, error) {
|
||||||
return s.DefaultRetentionPolicyFn(database)
|
return s.DefaultRetentionPolicyFn(database)
|
||||||
}
|
}
|
||||||
|
@ -1003,3 +1183,11 @@ func (s *StatementExecutorStore) CreateContinuousQuery(database, name, query str
|
||||||
func (s *StatementExecutorStore) DropContinuousQuery(database, name string) error {
|
func (s *StatementExecutorStore) DropContinuousQuery(database, name string) error {
|
||||||
return s.DropContinuousQueryFn(database, name)
|
return s.DropContinuousQueryFn(database, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *StatementExecutorStore) CreateSubscription(database, rp, name, typ string, hosts []string) error {
|
||||||
|
return s.CreateSubscriptionFn(database, rp, name, typ, hosts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StatementExecutorStore) DropSubscription(database, rp, name string) error {
|
||||||
|
return s.DropSubscriptionFn(database, rp, name)
|
||||||
|
}
|
||||||
|
|
|
@ -927,6 +927,16 @@ func (s *Store) DropDatabase(name string) error {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RenameDatabase renames a database in the metastore
|
||||||
|
func (s *Store) RenameDatabase(oldName, newName string) error {
|
||||||
|
return s.exec(internal.Command_RenameDatabaseCommand, internal.E_RenameDatabaseCommand_Command,
|
||||||
|
&internal.RenameDatabaseCommand{
|
||||||
|
OldName: proto.String(oldName),
|
||||||
|
NewName: proto.String(newName),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// RetentionPolicy returns a retention policy for a database by name.
|
// RetentionPolicy returns a retention policy for a database by name.
|
||||||
func (s *Store) RetentionPolicy(database, name string) (rpi *RetentionPolicyInfo, err error) {
|
func (s *Store) RetentionPolicy(database, name string) (rpi *RetentionPolicyInfo, err error) {
|
||||||
err = s.read(func(data *Data) error {
|
err = s.read(func(data *Data) error {
|
||||||
|
@ -1201,6 +1211,30 @@ func (s *Store) DropContinuousQuery(database, name string) error {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateSubscription creates a new subscription on the store.
|
||||||
|
func (s *Store) CreateSubscription(database, rp, name, mode string, destinations []string) error {
|
||||||
|
return s.exec(internal.Command_CreateSubscriptionCommand, internal.E_CreateSubscriptionCommand_Command,
|
||||||
|
&internal.CreateSubscriptionCommand{
|
||||||
|
Database: proto.String(database),
|
||||||
|
RetentionPolicy: proto.String(rp),
|
||||||
|
Name: proto.String(name),
|
||||||
|
Mode: proto.String(mode),
|
||||||
|
Destinations: destinations,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DropSubscription removes a subscription from the store.
|
||||||
|
func (s *Store) DropSubscription(database, rp, name string) error {
|
||||||
|
return s.exec(internal.Command_DropSubscriptionCommand, internal.E_DropSubscriptionCommand_Command,
|
||||||
|
&internal.DropSubscriptionCommand{
|
||||||
|
Database: proto.String(database),
|
||||||
|
RetentionPolicy: proto.String(rp),
|
||||||
|
Name: proto.String(name),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// User returns a user by name.
|
// User returns a user by name.
|
||||||
func (s *Store) User(name string) (ui *UserInfo, err error) {
|
func (s *Store) User(name string) (ui *UserInfo, err error) {
|
||||||
err = s.read(func(data *Data) error {
|
err = s.read(func(data *Data) error {
|
||||||
|
@ -1602,6 +1636,14 @@ func (s *Store) SetHashPasswordFn(fn HashPasswordFn) {
|
||||||
s.hashPassword = fn
|
s.hashPassword = fn
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// notifiyChanged will close a changed channel which brooadcasts to all waiting
|
||||||
|
// goroutines that the meta store has been updated. Callers are responsible for locking
|
||||||
|
// the meta store before calling this.
|
||||||
|
func (s *Store) notifyChanged() {
|
||||||
|
close(s.changed)
|
||||||
|
s.changed = make(chan struct{})
|
||||||
|
}
|
||||||
|
|
||||||
// storeFSM represents the finite state machine used by Store to interact with Raft.
|
// storeFSM represents the finite state machine used by Store to interact with Raft.
|
||||||
type storeFSM Store
|
type storeFSM Store
|
||||||
|
|
||||||
|
@ -1626,6 +1668,8 @@ func (fsm *storeFSM) Apply(l *raft.Log) interface{} {
|
||||||
return fsm.applyCreateDatabaseCommand(&cmd)
|
return fsm.applyCreateDatabaseCommand(&cmd)
|
||||||
case internal.Command_DropDatabaseCommand:
|
case internal.Command_DropDatabaseCommand:
|
||||||
return fsm.applyDropDatabaseCommand(&cmd)
|
return fsm.applyDropDatabaseCommand(&cmd)
|
||||||
|
case internal.Command_RenameDatabaseCommand:
|
||||||
|
return fsm.applyRenameDatabaseCommand(&cmd)
|
||||||
case internal.Command_CreateRetentionPolicyCommand:
|
case internal.Command_CreateRetentionPolicyCommand:
|
||||||
return fsm.applyCreateRetentionPolicyCommand(&cmd)
|
return fsm.applyCreateRetentionPolicyCommand(&cmd)
|
||||||
case internal.Command_DropRetentionPolicyCommand:
|
case internal.Command_DropRetentionPolicyCommand:
|
||||||
|
@ -1642,6 +1686,10 @@ func (fsm *storeFSM) Apply(l *raft.Log) interface{} {
|
||||||
return fsm.applyCreateContinuousQueryCommand(&cmd)
|
return fsm.applyCreateContinuousQueryCommand(&cmd)
|
||||||
case internal.Command_DropContinuousQueryCommand:
|
case internal.Command_DropContinuousQueryCommand:
|
||||||
return fsm.applyDropContinuousQueryCommand(&cmd)
|
return fsm.applyDropContinuousQueryCommand(&cmd)
|
||||||
|
case internal.Command_CreateSubscriptionCommand:
|
||||||
|
return fsm.applyCreateSubscriptionCommand(&cmd)
|
||||||
|
case internal.Command_DropSubscriptionCommand:
|
||||||
|
return fsm.applyDropSubscriptionCommand(&cmd)
|
||||||
case internal.Command_CreateUserCommand:
|
case internal.Command_CreateUserCommand:
|
||||||
return fsm.applyCreateUserCommand(&cmd)
|
return fsm.applyCreateUserCommand(&cmd)
|
||||||
case internal.Command_DropUserCommand:
|
case internal.Command_DropUserCommand:
|
||||||
|
@ -1664,8 +1712,7 @@ func (fsm *storeFSM) Apply(l *raft.Log) interface{} {
|
||||||
// Copy term and index to new metadata.
|
// Copy term and index to new metadata.
|
||||||
fsm.data.Term = l.Term
|
fsm.data.Term = l.Term
|
||||||
fsm.data.Index = l.Index
|
fsm.data.Index = l.Index
|
||||||
close(s.changed)
|
s.notifyChanged()
|
||||||
s.changed = make(chan struct{})
|
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1751,6 +1798,20 @@ func (fsm *storeFSM) applyDropDatabaseCommand(cmd *internal.Command) interface{}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fsm *storeFSM) applyRenameDatabaseCommand(cmd *internal.Command) interface{} {
|
||||||
|
ext, _ := proto.GetExtension(cmd, internal.E_RenameDatabaseCommand_Command)
|
||||||
|
v := ext.(*internal.RenameDatabaseCommand)
|
||||||
|
|
||||||
|
// Copy data and update.
|
||||||
|
other := fsm.data.Clone()
|
||||||
|
if err := other.RenameDatabase(v.GetOldName(), v.GetNewName()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fsm.data = other
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (fsm *storeFSM) applyCreateRetentionPolicyCommand(cmd *internal.Command) interface{} {
|
func (fsm *storeFSM) applyCreateRetentionPolicyCommand(cmd *internal.Command) interface{} {
|
||||||
ext, _ := proto.GetExtension(cmd, internal.E_CreateRetentionPolicyCommand_Command)
|
ext, _ := proto.GetExtension(cmd, internal.E_CreateRetentionPolicyCommand_Command)
|
||||||
v := ext.(*internal.CreateRetentionPolicyCommand)
|
v := ext.(*internal.CreateRetentionPolicyCommand)
|
||||||
|
@ -1881,6 +1942,34 @@ func (fsm *storeFSM) applyDropContinuousQueryCommand(cmd *internal.Command) inte
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fsm *storeFSM) applyCreateSubscriptionCommand(cmd *internal.Command) interface{} {
|
||||||
|
ext, _ := proto.GetExtension(cmd, internal.E_CreateSubscriptionCommand_Command)
|
||||||
|
v := ext.(*internal.CreateSubscriptionCommand)
|
||||||
|
|
||||||
|
// Copy data and update.
|
||||||
|
other := fsm.data.Clone()
|
||||||
|
if err := other.CreateSubscription(v.GetDatabase(), v.GetRetentionPolicy(), v.GetName(), v.GetMode(), v.GetDestinations()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fsm.data = other
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fsm *storeFSM) applyDropSubscriptionCommand(cmd *internal.Command) interface{} {
|
||||||
|
ext, _ := proto.GetExtension(cmd, internal.E_DropSubscriptionCommand_Command)
|
||||||
|
v := ext.(*internal.DropSubscriptionCommand)
|
||||||
|
|
||||||
|
// Copy data and update.
|
||||||
|
other := fsm.data.Clone()
|
||||||
|
if err := other.DropSubscription(v.GetDatabase(), v.GetRetentionPolicy(), v.GetName()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fsm.data = other
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (fsm *storeFSM) applyCreateUserCommand(cmd *internal.Command) interface{} {
|
func (fsm *storeFSM) applyCreateUserCommand(cmd *internal.Command) interface{} {
|
||||||
ext, _ := proto.GetExtension(cmd, internal.E_CreateUserCommand_Command)
|
ext, _ := proto.GetExtension(cmd, internal.E_CreateUserCommand_Command)
|
||||||
v := ext.(*internal.CreateUserCommand)
|
v := ext.(*internal.CreateUserCommand)
|
||||||
|
|
|
@ -244,6 +244,76 @@ func TestStore_DropDatabase_ErrDatabaseNotFound(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure the store can rename an existing database.
|
||||||
|
func TestStore_RenameDatabase(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s := MustOpenStore()
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
// Create three databases.
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
if _, err := s.CreateDatabase(fmt.Sprintf("db%d", i)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rename database db1, leaving db0 and db2 unchanged.
|
||||||
|
if err := s.RenameDatabase("db1", "db3"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the nodes are correct.
|
||||||
|
exp := &meta.DatabaseInfo{Name: "db0"}
|
||||||
|
if di, _ := s.Database("db0"); !reflect.DeepEqual(di, exp) {
|
||||||
|
t.Fatalf("unexpected database(0): \ngot: %#v\nexp: %#v", di, exp)
|
||||||
|
|
||||||
|
}
|
||||||
|
if di, _ := s.Database("db1"); di != nil {
|
||||||
|
t.Fatalf("unexpected database(1): %#v", di)
|
||||||
|
}
|
||||||
|
|
||||||
|
exp = &meta.DatabaseInfo{Name: "db2"}
|
||||||
|
if di, _ := s.Database("db2"); !reflect.DeepEqual(di, exp) {
|
||||||
|
t.Fatalf("unexpected database(2): \ngot: %#v\nexp: %#v", di, exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
exp = &meta.DatabaseInfo{Name: "db3"}
|
||||||
|
if di, _ := s.Database("db3"); !reflect.DeepEqual(di, exp) {
|
||||||
|
t.Fatalf("unexpected database(2): \ngot: %#v\nexp: %#v", di, exp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the store returns an error when renaming a database that doesn't exist.
|
||||||
|
func TestStore_RenameDatabase_ErrDatabaseNotFound(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s := MustOpenStore()
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
if err := s.RenameDatabase("no_such_database", "another_database"); err != meta.ErrDatabaseNotFound {
|
||||||
|
t.Fatalf("unexpected error: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the store returns an error when renaming a database to a database that already exists.
|
||||||
|
func TestStore_RenameDatabase_ErrDatabaseExists(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s := MustOpenStore()
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
// create two databases
|
||||||
|
if _, err := s.CreateDatabase("db00"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := s.CreateDatabase("db01"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.RenameDatabase("db00", "db01"); err != meta.ErrDatabaseExists {
|
||||||
|
t.Fatalf("unexpected error: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure the store can create a retention policy on a database.
|
// Ensure the store can create a retention policy on a database.
|
||||||
func TestStore_CreateRetentionPolicy(t *testing.T) {
|
func TestStore_CreateRetentionPolicy(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
@ -649,6 +719,90 @@ func TestStore_DropContinuousQuery(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure the store can create a new subscription.
|
||||||
|
func TestStore_CreateSubscription(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s := MustOpenStore()
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
// Create subscription.
|
||||||
|
rpi := &meta.RetentionPolicyInfo{
|
||||||
|
Name: "rp0",
|
||||||
|
ReplicaN: 3,
|
||||||
|
}
|
||||||
|
if _, err := s.CreateDatabase("db0"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if _, err := s.CreateRetentionPolicy("db0", rpi); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := s.CreateSubscription("db0", "rp0", "s0", "t0", []string{"h0", "h1"}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that creating an existing subscription returns an error.
|
||||||
|
func TestStore_CreateSubscription_ErrSubscriptionExists(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s := MustOpenStore()
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
// Create subscription.
|
||||||
|
rpi := &meta.RetentionPolicyInfo{
|
||||||
|
Name: "rp0",
|
||||||
|
ReplicaN: 3,
|
||||||
|
}
|
||||||
|
if _, err := s.CreateDatabase("db0"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if _, err := s.CreateRetentionPolicy("db0", rpi); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := s.CreateSubscription("db0", "rp0", "s0", "t0", []string{"h0", "h1"}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create it again.
|
||||||
|
if err := s.CreateSubscription("db0", "rp0", "s0", "t0", []string{"h0", "h1"}); err != meta.ErrSubscriptionExists {
|
||||||
|
t.Fatalf("unexpected error: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the store can delete a subscription.
|
||||||
|
func TestStore_DropSubscription(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s := MustOpenStore()
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
// Create subscription.
|
||||||
|
rpi := &meta.RetentionPolicyInfo{
|
||||||
|
Name: "rp0",
|
||||||
|
ReplicaN: 3,
|
||||||
|
}
|
||||||
|
if _, err := s.CreateDatabase("db0"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if _, err := s.CreateRetentionPolicy("db0", rpi); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := s.CreateSubscription("db0", "rp0", "s0", "ANY", []string{"udp://h0:1234", "udp://h1:1234"}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := s.CreateSubscription("db0", "rp0", "s1", "ALL", []string{"udp://h0:1234", "udp://h1:1234"}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if err := s.CreateSubscription("db0", "rp0", "s2", "ANY", []string{"udp://h0:1234", "udp://h1:1234"}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove one of the subscriptions.
|
||||||
|
if err := s.DropSubscription("db0", "rp0", "s0"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the resulting subscriptions are correct.
|
||||||
|
if rpi, err := s.RetentionPolicy("db0", "rp0"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if !reflect.DeepEqual(rpi.Subscriptions, []meta.SubscriptionInfo{
|
||||||
|
{Name: "s1", Mode: "ALL", Destinations: []string{"udp://h0:1234", "udp://h1:1234"}},
|
||||||
|
{Name: "s2", Mode: "ANY", Destinations: []string{"udp://h0:1234", "udp://h1:1234"}},
|
||||||
|
}) {
|
||||||
|
t.Fatalf("unexpected subscriptions: %#v", rpi.Subscriptions)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure the store can create a user.
|
// Ensure the store can create a user.
|
||||||
func TestStore_CreateUser(t *testing.T) {
|
func TestStore_CreateUser(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
|
@ -207,7 +207,7 @@ func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, err
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
pt.time = time.Unix(0, ts*pt.GetPrecisionMultiplier(precision))
|
pt.time = time.Unix(0, ts*pt.GetPrecisionMultiplier(precision)).UTC()
|
||||||
}
|
}
|
||||||
return pt, nil
|
return pt, nil
|
||||||
}
|
}
|
||||||
|
@ -248,28 +248,28 @@ func scanKey(buf []byte, i int) (int, []byte, error) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
// equals is special in the tags section. It must be escaped if part of a tag name or value.
|
// equals is special in the tags section. It must be escaped if part of a tag key or value.
|
||||||
// It does not need to be escaped if part of the measurement.
|
// It does not need to be escaped if part of the measurement.
|
||||||
if buf[i] == '=' && commas > 0 {
|
if buf[i] == '=' && commas > 0 {
|
||||||
if i-1 < 0 || i-2 < 0 {
|
if i-1 < 0 || i-2 < 0 {
|
||||||
return i, buf[start:i], fmt.Errorf("missing tag name")
|
return i, buf[start:i], fmt.Errorf("missing tag key")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for "cpu,=value" but allow "cpu,a\,=value"
|
// Check for "cpu,=value" but allow "cpu,a\,=value"
|
||||||
if buf[i-1] == ',' && buf[i-2] != '\\' {
|
if buf[i-1] == ',' && buf[i-2] != '\\' {
|
||||||
return i, buf[start:i], fmt.Errorf("missing tag name")
|
return i, buf[start:i], fmt.Errorf("missing tag key")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for "cpu,\ =value"
|
// Check for "cpu,\ =value"
|
||||||
if buf[i-1] == ' ' && buf[i-2] != '\\' {
|
if buf[i-1] == ' ' && buf[i-2] != '\\' {
|
||||||
return i, buf[start:i], fmt.Errorf("missing tag name")
|
return i, buf[start:i], fmt.Errorf("missing tag key")
|
||||||
}
|
}
|
||||||
|
|
||||||
i += 1
|
i += 1
|
||||||
equals += 1
|
equals += 1
|
||||||
|
|
||||||
// Check for "cpu,a=1,b= value=1"
|
// Check for "cpu,a=1,b= value=1" or "cpu,a=1,b=,c=foo value=1"
|
||||||
if i < len(buf) && buf[i] == ' ' {
|
if i < len(buf) && (buf[i] == ' ' || buf[i] == ',') {
|
||||||
return i, buf[start:i], fmt.Errorf("missing tag value")
|
return i, buf[start:i], fmt.Errorf("missing tag value")
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
|
@ -459,12 +459,12 @@ func scanFields(buf []byte, i int) (int, []byte, error) {
|
||||||
|
|
||||||
// check for "... =123" but allow "a\ =123"
|
// check for "... =123" but allow "a\ =123"
|
||||||
if buf[i-1] == ' ' && buf[i-2] != '\\' {
|
if buf[i-1] == ' ' && buf[i-2] != '\\' {
|
||||||
return i, buf[start:i], fmt.Errorf("missing field name")
|
return i, buf[start:i], fmt.Errorf("missing field key")
|
||||||
}
|
}
|
||||||
|
|
||||||
// check for "...a=123,=456" but allow "a=123,a\,=456"
|
// check for "...a=123,=456" but allow "a=123,a\,=456"
|
||||||
if buf[i-1] == ',' && buf[i-2] != '\\' {
|
if buf[i-1] == ',' && buf[i-2] != '\\' {
|
||||||
return i, buf[start:i], fmt.Errorf("missing field name")
|
return i, buf[start:i], fmt.Errorf("missing field key")
|
||||||
}
|
}
|
||||||
|
|
||||||
// check for "... value="
|
// check for "... value="
|
||||||
|
@ -597,14 +597,14 @@ func scanNumber(buf []byte, i int) (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// `e` is valid for floats but not as the first char
|
// `e` is valid for floats but not as the first char
|
||||||
if i > start && (buf[i] == 'e') {
|
if i > start && (buf[i] == 'e' || buf[i] == 'E') {
|
||||||
scientific = true
|
scientific = true
|
||||||
i += 1
|
i += 1
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// + and - are only valid at this point if they follow an e (scientific notation)
|
// + and - are only valid at this point if they follow an e (scientific notation)
|
||||||
if (buf[i] == '+' || buf[i] == '-') && buf[i-1] == 'e' {
|
if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
|
||||||
i += 1
|
i += 1
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -198,7 +198,6 @@ func TestParsePointNoFields(t *testing.T) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, "cpu,,, value=1")
|
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, "cpu,,, value=1")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParsePointNoTimestamp(t *testing.T) {
|
func TestParsePointNoTimestamp(t *testing.T) {
|
||||||
|
@ -212,7 +211,7 @@ func TestParsePointMissingQuote(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParsePointMissingTagName(t *testing.T) {
|
func TestParsePointMissingTagKey(t *testing.T) {
|
||||||
_, err := models.ParsePointsString(`cpu,host=serverA,=us-east value=1i`)
|
_, err := models.ParsePointsString(`cpu,host=serverA,=us-east value=1i`)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,=us-east value=1i`)
|
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,=us-east value=1i`)
|
||||||
|
@ -248,6 +247,10 @@ func TestParsePointMissingTagValue(t *testing.T) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region= value=1i`)
|
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region= value=1i`)
|
||||||
}
|
}
|
||||||
|
_, err = models.ParsePointsString(`cpu,host=serverA,region=,zone=us-west value=1i`)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=,zone=us-west value=1i`)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParsePointMissingFieldName(t *testing.T) {
|
func TestParsePointMissingFieldName(t *testing.T) {
|
||||||
|
@ -269,7 +272,6 @@ func TestParsePointMissingFieldName(t *testing.T) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=123i,=456i`)
|
t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=123i,=456i`)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParsePointMissingFieldValue(t *testing.T) {
|
func TestParsePointMissingFieldValue(t *testing.T) {
|
||||||
|
@ -468,7 +470,22 @@ func TestParsePointFloatScientific(t *testing.T) {
|
||||||
if pts[0].Fields()["value"] != 1e4 {
|
if pts[0].Fields()["value"] != 1e4 {
|
||||||
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1e4`, err)
|
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1e4`, err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParsePointFloatScientificUpper(t *testing.T) {
|
||||||
|
_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0E4`)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0E4`, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pts, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1E4`)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0E4`, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pts[0].Fields()["value"] != 1e4 {
|
||||||
|
t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1E4`, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParsePointFloatScientificDecimal(t *testing.T) {
|
func TestParsePointFloatScientificDecimal(t *testing.T) {
|
||||||
|
@ -543,7 +560,7 @@ func TestParsePointUnescape(t *testing.T) {
|
||||||
test(t, `cpu,region\,zone=east value=1.0`,
|
test(t, `cpu,region\,zone=east value=1.0`,
|
||||||
models.NewPoint("cpu",
|
models.NewPoint("cpu",
|
||||||
models.Tags{
|
models.Tags{
|
||||||
"region,zone": "east", // comma in the tag name
|
"region,zone": "east", // comma in the tag key
|
||||||
},
|
},
|
||||||
models.Fields{
|
models.Fields{
|
||||||
"value": 1.0,
|
"value": 1.0,
|
||||||
|
@ -554,7 +571,7 @@ func TestParsePointUnescape(t *testing.T) {
|
||||||
test(t, `cpu,region\ zone=east value=1.0`,
|
test(t, `cpu,region\ zone=east value=1.0`,
|
||||||
models.NewPoint("cpu",
|
models.NewPoint("cpu",
|
||||||
models.Tags{
|
models.Tags{
|
||||||
"region zone": "east", // comma in the tag name
|
"region zone": "east", // comma in the tag key
|
||||||
},
|
},
|
||||||
models.Fields{
|
models.Fields{
|
||||||
"value": 1.0,
|
"value": 1.0,
|
||||||
|
@ -583,25 +600,25 @@ func TestParsePointUnescape(t *testing.T) {
|
||||||
},
|
},
|
||||||
time.Unix(0, 0)))
|
time.Unix(0, 0)))
|
||||||
|
|
||||||
// commas in field names
|
// commas in field keys
|
||||||
test(t, `cpu,regions=east value\,ms=1.0`,
|
test(t, `cpu,regions=east value\,ms=1.0`,
|
||||||
models.NewPoint("cpu",
|
models.NewPoint("cpu",
|
||||||
models.Tags{
|
models.Tags{
|
||||||
"regions": "east",
|
"regions": "east",
|
||||||
},
|
},
|
||||||
models.Fields{
|
models.Fields{
|
||||||
"value,ms": 1.0, // comma in the field name
|
"value,ms": 1.0, // comma in the field keys
|
||||||
},
|
},
|
||||||
time.Unix(0, 0)))
|
time.Unix(0, 0)))
|
||||||
|
|
||||||
// spaces in field names
|
// spaces in field keys
|
||||||
test(t, `cpu,regions=east value\ ms=1.0`,
|
test(t, `cpu,regions=east value\ ms=1.0`,
|
||||||
models.NewPoint("cpu",
|
models.NewPoint("cpu",
|
||||||
models.Tags{
|
models.Tags{
|
||||||
"regions": "east",
|
"regions": "east",
|
||||||
},
|
},
|
||||||
models.Fields{
|
models.Fields{
|
||||||
"value ms": 1.0, // comma in the field name
|
"value ms": 1.0, // comma in the field keys
|
||||||
},
|
},
|
||||||
time.Unix(0, 0)))
|
time.Unix(0, 0)))
|
||||||
|
|
||||||
|
@ -640,7 +657,7 @@ func TestParsePointUnescape(t *testing.T) {
|
||||||
},
|
},
|
||||||
time.Unix(0, 0)))
|
time.Unix(0, 0)))
|
||||||
|
|
||||||
// field name using escape char.
|
// field keys using escape char.
|
||||||
test(t, `cpu \a=1i`,
|
test(t, `cpu \a=1i`,
|
||||||
models.NewPoint(
|
models.NewPoint(
|
||||||
"cpu",
|
"cpu",
|
||||||
|
|
|
@ -368,7 +368,7 @@ func (m *Monitor) storeStatistics() {
|
||||||
|
|
||||||
points := make(models.Points, 0, len(stats))
|
points := make(models.Points, 0, len(stats))
|
||||||
for _, s := range stats {
|
for _, s := range stats {
|
||||||
points = append(points, models.NewPoint(s.Name, s.Tags, s.Values, time.Now()))
|
points = append(points, models.NewPoint(s.Name, s.Tags, s.Values, time.Now().Truncate(time.Second)))
|
||||||
}
|
}
|
||||||
|
|
||||||
err = m.PointsWriter.WritePoints(&cluster.WritePointsRequest{
|
err = m.PointsWriter.WritePoints(&cluster.WritePointsRequest{
|
||||||
|
|
|
@ -68,6 +68,8 @@ GOPATH_INSTALL=
|
||||||
BINS=(
|
BINS=(
|
||||||
influxd
|
influxd
|
||||||
influx
|
influx
|
||||||
|
influx_stress
|
||||||
|
influx_inspect
|
||||||
)
|
)
|
||||||
|
|
||||||
###########################################################################
|
###########################################################################
|
||||||
|
@ -284,6 +286,8 @@ rm -f $INSTALL_ROOT_DIR/influx
|
||||||
rm -f $INSTALL_ROOT_DIR/init.sh
|
rm -f $INSTALL_ROOT_DIR/init.sh
|
||||||
ln -s $INSTALL_ROOT_DIR/versions/$version/influxd $INSTALL_ROOT_DIR/influxd
|
ln -s $INSTALL_ROOT_DIR/versions/$version/influxd $INSTALL_ROOT_DIR/influxd
|
||||||
ln -s $INSTALL_ROOT_DIR/versions/$version/influx $INSTALL_ROOT_DIR/influx
|
ln -s $INSTALL_ROOT_DIR/versions/$version/influx $INSTALL_ROOT_DIR/influx
|
||||||
|
ln -s $INSTALL_ROOT_DIR/versions/$version/influx_inspect $INSTALL_ROOT_DIR/influx_inspect
|
||||||
|
ln -s $INSTALL_ROOT_DIR/versions/$version/influx_stress $INSTALL_ROOT_DIR/influx_stress
|
||||||
ln -s $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh $INSTALL_ROOT_DIR/init.sh
|
ln -s $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh $INSTALL_ROOT_DIR/init.sh
|
||||||
|
|
||||||
if ! id influxdb >/dev/null 2>&1; then
|
if ! id influxdb >/dev/null 2>&1; then
|
||||||
|
@ -467,7 +471,7 @@ if [ $? -ne 0 ]; then
|
||||||
cleanup_exit 1
|
cleanup_exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cp $LOGROTATE $TMP_WORK_DIR/$LOGROTATE_DIR/influxd
|
install -m 644 $LOGROTATE $TMP_WORK_DIR/$LOGROTATE_DIR/influxdb
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "Failed to copy logrotate configuration to packaging directory -- aborting."
|
echo "Failed to copy logrotate configuration to packaging directory -- aborting."
|
||||||
cleanup_exit 1
|
cleanup_exit 1
|
||||||
|
|
45
Godeps/_workspace/src/github.com/influxdb/influxdb/pkg/escape/bytes_test.go
generated
vendored
Normal file
45
Godeps/_workspace/src/github.com/influxdb/influxdb/pkg/escape/bytes_test.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
package escape
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestUnescape(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
in []byte
|
||||||
|
out []byte
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
[]byte(nil),
|
||||||
|
[]byte(nil),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
[]byte(""),
|
||||||
|
[]byte(nil),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
[]byte("\\,\\\"\\ \\="),
|
||||||
|
[]byte(",\" ="),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
[]byte("\\\\"),
|
||||||
|
[]byte("\\\\"),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
[]byte("plain and simple"),
|
||||||
|
[]byte("plain and simple"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for ii, tt := range tests {
|
||||||
|
got := Unescape(tt.in)
|
||||||
|
if !reflect.DeepEqual(got, tt.out) {
|
||||||
|
t.Errorf("[%d] Unescape(%#v) = %#v, expected %#v", ii, string(tt.in), string(got), string(tt.out))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
15
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/README.md
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/README.md
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
# The collectd Input
|
||||||
|
|
||||||
|
The _collectd_ input allows InfluxDB to accept data transmitted in collectd native format. This data is transmitted over UDP.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Each collectd input allows the binding address, target database, and target retention policy to be set. If the database does not exist, it will be created automatically when the input is initialized. If the retention policy is not configured, then the default retention policy for the database is used. However if the retention policy is set, the retention policy must be explicitly created. The input will not automatically create it.
|
||||||
|
|
||||||
|
Each collectd input also performs internal batching of the points it receives, as batched writes to the database are more efficient. The default batch size is 1000, pending batch factor is 5, with a batch timeout of 1 second. This means the input will write batches of maximum size 1000, but if a batch has not reached 1000 points within 1 second of the first point being added to a batch, it will emit that batch regardless of size. The pending batch factor controls how many batches can be in memory at once, allowing the input to transmit a batch, while still building other batches.
|
||||||
|
|
||||||
|
The path to the collectd types database file may also be set
|
||||||
|
|
||||||
|
## Large UDP packets
|
||||||
|
|
||||||
|
Please note that UDP packages larger than the standard size of 1452 are dropped at the time of ingestion, so be sure to set `MaxPacketSize` to 1452 in the collectd configuration.
|
116
Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/service.go
generated
vendored
116
Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/service.go
generated
vendored
|
@ -11,10 +11,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdb/influxdb"
|
"github.com/influxdb/influxdb"
|
||||||
"github.com/influxdb/influxdb/cluster"
|
|
||||||
"github.com/influxdb/influxdb/influxql"
|
"github.com/influxdb/influxdb/influxql"
|
||||||
"github.com/influxdb/influxdb/meta"
|
"github.com/influxdb/influxdb/meta"
|
||||||
"github.com/influxdb/influxdb/models"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
"github.com/influxdb/influxdb/tsdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -48,11 +46,6 @@ type metaStore interface {
|
||||||
Database(name string) (*meta.DatabaseInfo, error)
|
Database(name string) (*meta.DatabaseInfo, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// pointsWriter is an internal interface to make testing easier.
|
|
||||||
type pointsWriter interface {
|
|
||||||
WritePoints(p *cluster.WritePointsRequest) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunRequest is a request to run one or more CQs.
|
// RunRequest is a request to run one or more CQs.
|
||||||
type RunRequest struct {
|
type RunRequest struct {
|
||||||
// Now tells the CQ serivce what the current time is.
|
// Now tells the CQ serivce what the current time is.
|
||||||
|
@ -79,7 +72,6 @@ func (rr *RunRequest) matches(cq *meta.ContinuousQueryInfo) bool {
|
||||||
type Service struct {
|
type Service struct {
|
||||||
MetaStore metaStore
|
MetaStore metaStore
|
||||||
QueryExecutor queryExecutor
|
QueryExecutor queryExecutor
|
||||||
PointsWriter pointsWriter
|
|
||||||
Config *Config
|
Config *Config
|
||||||
RunInterval time.Duration
|
RunInterval time.Duration
|
||||||
// RunCh can be used by clients to signal service to run CQs.
|
// RunCh can be used by clients to signal service to run CQs.
|
||||||
|
@ -119,7 +111,6 @@ func (s *Service) Open() error {
|
||||||
|
|
||||||
assert(s.MetaStore != nil, "MetaStore is nil")
|
assert(s.MetaStore != nil, "MetaStore is nil")
|
||||||
assert(s.QueryExecutor != nil, "QueryExecutor is nil")
|
assert(s.QueryExecutor != nil, "QueryExecutor is nil")
|
||||||
assert(s.PointsWriter != nil, "PointsWriter is nil")
|
|
||||||
|
|
||||||
s.stop = make(chan struct{})
|
s.stop = make(chan struct{})
|
||||||
s.wg = &sync.WaitGroup{}
|
s.wg = &sync.WaitGroup{}
|
||||||
|
@ -331,103 +322,16 @@ func (s *Service) runContinuousQueryAndWriteResult(cq *ContinuousQuery) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// There is only one statement, so we will only ever receive one result
|
||||||
// Read all rows from the result channel.
|
res, ok := <-ch
|
||||||
points := make([]models.Point, 0, 100)
|
if !ok {
|
||||||
for result := range ch {
|
panic("result channel was closed")
|
||||||
if result.Err != nil {
|
|
||||||
return result.Err
|
|
||||||
}
|
}
|
||||||
|
if res.Err != nil {
|
||||||
for _, row := range result.Series {
|
return res.Err
|
||||||
// Get the measurement name for the result.
|
|
||||||
measurement := cq.intoMeasurement()
|
|
||||||
if measurement == "" {
|
|
||||||
measurement = row.Name
|
|
||||||
}
|
}
|
||||||
// Convert the result row to points.
|
|
||||||
part, err := s.convertRowToPoints(measurement, row)
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(part) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the points have any nil values, can't write.
|
|
||||||
// This happens if the CQ is created and running before data is written to the measurement.
|
|
||||||
for _, p := range part {
|
|
||||||
fields := p.Fields()
|
|
||||||
for _, v := range fields {
|
|
||||||
if v == nil {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
points = append(points, part...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(points) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a write request for the points.
|
|
||||||
req := &cluster.WritePointsRequest{
|
|
||||||
Database: cq.intoDB(),
|
|
||||||
RetentionPolicy: cq.intoRP(),
|
|
||||||
ConsistencyLevel: cluster.ConsistencyLevelAny,
|
|
||||||
Points: points,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the request.
|
|
||||||
if err := s.PointsWriter.WritePoints(req); err != nil {
|
|
||||||
s.Logger.Println(err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.statMap.Add(statPointsWritten, int64(len(points)))
|
|
||||||
if s.loggingEnabled {
|
|
||||||
s.Logger.Printf("wrote %d point(s) to %s.%s", len(points), cq.intoDB(), cq.intoRP())
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// convertRowToPoints will convert a query result Row into Points that can be written back in.
|
|
||||||
// Used for continuous and INTO queries
|
|
||||||
func (s *Service) convertRowToPoints(measurementName string, row *models.Row) ([]models.Point, error) {
|
|
||||||
// figure out which parts of the result are the time and which are the fields
|
|
||||||
timeIndex := -1
|
|
||||||
fieldIndexes := make(map[string]int)
|
|
||||||
for i, c := range row.Columns {
|
|
||||||
if c == "time" {
|
|
||||||
timeIndex = i
|
|
||||||
} else {
|
|
||||||
fieldIndexes[c] = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if timeIndex == -1 {
|
|
||||||
return nil, errors.New("error finding time index in result")
|
|
||||||
}
|
|
||||||
|
|
||||||
points := make([]models.Point, 0, len(row.Values))
|
|
||||||
for _, v := range row.Values {
|
|
||||||
vals := make(map[string]interface{})
|
|
||||||
for fieldName, fieldIndex := range fieldIndexes {
|
|
||||||
vals[fieldName] = v[fieldIndex]
|
|
||||||
}
|
|
||||||
|
|
||||||
p := models.NewPoint(measurementName, row.Tags, vals, v[timeIndex].(time.Time))
|
|
||||||
|
|
||||||
points = append(points, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return points, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContinuousQuery is a local wrapper / helper around continuous queries.
|
// ContinuousQuery is a local wrapper / helper around continuous queries.
|
||||||
type ContinuousQuery struct {
|
type ContinuousQuery struct {
|
||||||
|
@ -437,16 +341,8 @@ type ContinuousQuery struct {
|
||||||
q *influxql.SelectStatement
|
q *influxql.SelectStatement
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cq *ContinuousQuery) intoDB() string {
|
|
||||||
if cq.q.Target.Measurement.Database != "" {
|
|
||||||
return cq.q.Target.Measurement.Database
|
|
||||||
}
|
|
||||||
return cq.Database
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cq *ContinuousQuery) intoRP() string { return cq.q.Target.Measurement.RetentionPolicy }
|
func (cq *ContinuousQuery) intoRP() string { return cq.q.Target.Measurement.RetentionPolicy }
|
||||||
func (cq *ContinuousQuery) setIntoRP(rp string) { cq.q.Target.Measurement.RetentionPolicy = rp }
|
func (cq *ContinuousQuery) setIntoRP(rp string) { cq.q.Target.Measurement.RetentionPolicy = rp }
|
||||||
func (cq *ContinuousQuery) intoMeasurement() string { return cq.q.Target.Measurement.Name }
|
|
||||||
|
|
||||||
// NewContinuousQuery returns a ContinuousQuery object with a parsed influxql.CreateContinuousQueryStatement
|
// NewContinuousQuery returns a ContinuousQuery object with a parsed influxql.CreateContinuousQueryStatement
|
||||||
func NewContinuousQuery(database string, cqi *meta.ContinuousQueryInfo) (*ContinuousQuery, error) {
|
func NewContinuousQuery(database string, cqi *meta.ContinuousQueryInfo) (*ContinuousQuery, error) {
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -38,95 +37,6 @@ func TestOpenAndClose(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test ExecuteContinuousQuery.
|
|
||||||
func TestExecuteContinuousQuery(t *testing.T) {
|
|
||||||
s := NewTestService(t)
|
|
||||||
dbis, _ := s.MetaStore.Databases()
|
|
||||||
dbi := dbis[0]
|
|
||||||
cqi := dbi.ContinuousQueries[0]
|
|
||||||
|
|
||||||
pointCnt := 100
|
|
||||||
qe := s.QueryExecutor.(*QueryExecutor)
|
|
||||||
qe.Results = []*influxql.Result{genResult(1, pointCnt)}
|
|
||||||
|
|
||||||
pw := s.PointsWriter.(*PointsWriter)
|
|
||||||
pw.WritePointsFn = func(p *cluster.WritePointsRequest) error {
|
|
||||||
if len(p.Points) != pointCnt {
|
|
||||||
return fmt.Errorf("exp = %d, got = %d", pointCnt, len(p.Points))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err := s.ExecuteContinuousQuery(&dbi, &cqi, time.Now())
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test ExecuteContinuousQuery when INTO measurements are taken from the FROM clause.
|
|
||||||
func TestExecuteContinuousQuery_ReferenceSource(t *testing.T) {
|
|
||||||
s := NewTestService(t)
|
|
||||||
dbis, _ := s.MetaStore.Databases()
|
|
||||||
dbi := dbis[2]
|
|
||||||
cqi := dbi.ContinuousQueries[0]
|
|
||||||
|
|
||||||
rowCnt := 2
|
|
||||||
pointCnt := 1
|
|
||||||
qe := s.QueryExecutor.(*QueryExecutor)
|
|
||||||
qe.Results = []*influxql.Result{genResult(rowCnt, pointCnt)}
|
|
||||||
|
|
||||||
pw := s.PointsWriter.(*PointsWriter)
|
|
||||||
pw.WritePointsFn = func(p *cluster.WritePointsRequest) error {
|
|
||||||
if len(p.Points) != pointCnt*rowCnt {
|
|
||||||
return fmt.Errorf("exp = %d, got = %d", pointCnt, len(p.Points))
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := "cpu,host=server01 value=0"
|
|
||||||
got := p.Points[0].String()
|
|
||||||
if !strings.Contains(got, exp) {
|
|
||||||
return fmt.Errorf("\n\tExpected ':MEASUREMENT' to be expanded to the measurement name(s) in the FROM regexp.\n\tqry = %s\n\texp = %s\n\tgot = %s\n", cqi.Query, got, exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp = "cpu2,host=server01 value=0"
|
|
||||||
got = p.Points[1].String()
|
|
||||||
if !strings.Contains(got, exp) {
|
|
||||||
return fmt.Errorf("\n\tExpected ':MEASUREMENT' to be expanded to the measurement name(s) in the FROM regexp.\n\tqry = %s\n\texp = %s\n\tgot = %s\n", cqi.Query, got, exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err := s.ExecuteContinuousQuery(&dbi, &cqi, time.Now())
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test the service happy path.
|
|
||||||
func TestContinuousQueryService(t *testing.T) {
|
|
||||||
s := NewTestService(t)
|
|
||||||
|
|
||||||
pointCnt := 100
|
|
||||||
qe := s.QueryExecutor.(*QueryExecutor)
|
|
||||||
qe.Results = []*influxql.Result{genResult(1, pointCnt)}
|
|
||||||
|
|
||||||
pw := s.PointsWriter.(*PointsWriter)
|
|
||||||
ch := make(chan int, 10)
|
|
||||||
defer close(ch)
|
|
||||||
pw.WritePointsFn = func(p *cluster.WritePointsRequest) error {
|
|
||||||
ch <- len(p.Points)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Open()
|
|
||||||
if cnt, err := waitInt(ch, time.Second); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
} else if cnt != pointCnt {
|
|
||||||
t.Errorf("exp = %d, got = %d", pointCnt, cnt)
|
|
||||||
}
|
|
||||||
s.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test Run method.
|
// Test Run method.
|
||||||
func TestContinuousQueryService_Run(t *testing.T) {
|
func TestContinuousQueryService_Run(t *testing.T) {
|
||||||
s := NewTestService(t)
|
s := NewTestService(t)
|
||||||
|
@ -148,7 +58,9 @@ func TestContinuousQueryService_Run(t *testing.T) {
|
||||||
if callCnt >= expectCallCnt {
|
if callCnt >= expectCallCnt {
|
||||||
done <- struct{}{}
|
done <- struct{}{}
|
||||||
}
|
}
|
||||||
return nil, nil
|
dummych := make(chan *influxql.Result, 1)
|
||||||
|
dummych <- &influxql.Result{}
|
||||||
|
return dummych, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Open()
|
s.Open()
|
||||||
|
@ -280,7 +192,6 @@ func NewTestService(t *testing.T) *Service {
|
||||||
ms := NewMetaStore(t)
|
ms := NewMetaStore(t)
|
||||||
s.MetaStore = ms
|
s.MetaStore = ms
|
||||||
s.QueryExecutor = NewQueryExecutor(t)
|
s.QueryExecutor = NewQueryExecutor(t)
|
||||||
s.PointsWriter = NewPointsWriter(t)
|
|
||||||
s.RunInterval = time.Millisecond
|
s.RunInterval = time.Millisecond
|
||||||
|
|
||||||
// Set Logger to write to dev/null so stdout isn't polluted.
|
// Set Logger to write to dev/null so stdout isn't polluted.
|
||||||
|
@ -411,7 +322,6 @@ type QueryExecutor struct {
|
||||||
ResultInterval time.Duration
|
ResultInterval time.Duration
|
||||||
Err error
|
Err error
|
||||||
ErrAfterResult int
|
ErrAfterResult int
|
||||||
StopRespondingAfter int
|
|
||||||
t *testing.T
|
t *testing.T
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -419,7 +329,6 @@ type QueryExecutor struct {
|
||||||
func NewQueryExecutor(t *testing.T) *QueryExecutor {
|
func NewQueryExecutor(t *testing.T) *QueryExecutor {
|
||||||
return &QueryExecutor{
|
return &QueryExecutor{
|
||||||
ErrAfterResult: -1,
|
ErrAfterResult: -1,
|
||||||
StopRespondingAfter: -1,
|
|
||||||
t: t,
|
t: t,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -450,15 +359,15 @@ func (qe *QueryExecutor) ExecuteQuery(query *influxql.Query, database string, ch
|
||||||
ch <- &influxql.Result{Err: qe.Err}
|
ch <- &influxql.Result{Err: qe.Err}
|
||||||
close(ch)
|
close(ch)
|
||||||
return
|
return
|
||||||
} else if i == qe.StopRespondingAfter {
|
|
||||||
qe.t.Log("ExecuteQuery(): StopRespondingAfter")
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
ch <- r
|
ch <- r
|
||||||
n++
|
n++
|
||||||
time.Sleep(qe.ResultInterval)
|
time.Sleep(qe.ResultInterval)
|
||||||
}
|
}
|
||||||
qe.t.Logf("ExecuteQuery(): all (%d) results sent", n)
|
qe.t.Logf("ExecuteQuery(): all (%d) results sent", n)
|
||||||
|
if n == 0 {
|
||||||
|
ch <- &influxql.Result{Err: qe.Err}
|
||||||
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
38
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/README.md
generated
vendored
38
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/README.md
generated
vendored
|
@ -14,7 +14,7 @@ To extract tags from metrics, one or more templates must be configured to parse
|
||||||
|
|
||||||
## Templates
|
## Templates
|
||||||
|
|
||||||
Templates allow matching parts of a metric name to be used as tag names in the stored metric. They have a similar format to graphite metric names. The values in between the separators are used as the tag name. The location of the tag name that matches the same position as the graphite metric section is used as the value. If there is no value, the graphite portion is skipped.
|
Templates allow matching parts of a metric name to be used as tag keys in the stored metric. They have a similar format to graphite metric names. The values in between the separators are used as the tag keys. The location of the tag key that matches the same position as the graphite metric section is used as the value. If there is no value, the graphite portion is skipped.
|
||||||
|
|
||||||
The special value _measurement_ is used to define the measurement name. It can have a trailing `*` to indicate that the remainder of the metric should be used. If a _measurement_ is not specified, the full metric name is used.
|
The special value _measurement_ is used to define the measurement name. It can have a trailing `*` to indicate that the remainder of the metric should be used. If a _measurement_ is not specified, the full metric name is used.
|
||||||
|
|
||||||
|
@ -48,6 +48,39 @@ Additional tags can be added to a metric that don't exist on the received metric
|
||||||
* Template: `.host.resource.measurement* region=us-west,zone=1a`
|
* Template: `.host.resource.measurement* region=us-west,zone=1a`
|
||||||
* Output: _measurement_ = `loadavg.10` _tags_ = `host=localhost resource=cpu region=us-west zone=1a`
|
* Output: _measurement_ = `loadavg.10` _tags_ = `host=localhost resource=cpu region=us-west zone=1a`
|
||||||
|
|
||||||
|
### Fields
|
||||||
|
|
||||||
|
A field key can be specified by using the keyword _field_. By default if no _field_ keyword is specified then the metric will be written to a field named _value_.
|
||||||
|
|
||||||
|
When using the current default engine _BZ1_, it's recommended to use a single field per value for performance reasons.
|
||||||
|
|
||||||
|
When using the _TSM1_ engine it's possible to amend measurement metrics with additional fields, e.g:
|
||||||
|
|
||||||
|
Input:
|
||||||
|
```
|
||||||
|
sensu.metric.net.server0.eth0.rx_packets 461295119435 1444234982
|
||||||
|
sensu.metric.net.server0.eth0.tx_bytes 1093086493388480 1444234982
|
||||||
|
sensu.metric.net.server0.eth0.rx_bytes 1015633926034834 1444234982
|
||||||
|
sensu.metric.net.server0.eth0.tx_errors 0 1444234982
|
||||||
|
sensu.metric.net.server0.eth0.rx_errors 0 1444234982
|
||||||
|
sensu.metric.net.server0.eth0.tx_dropped 0 1444234982
|
||||||
|
sensu.metric.net.server0.eth0.rx_dropped 0 1444234982
|
||||||
|
```
|
||||||
|
|
||||||
|
With template:
|
||||||
|
```
|
||||||
|
sensu.metric.* ..measurement.host.interface.field
|
||||||
|
```
|
||||||
|
|
||||||
|
Becomes database entry:
|
||||||
|
```
|
||||||
|
> select * from net
|
||||||
|
name: net
|
||||||
|
---------
|
||||||
|
time host interface rx_bytes rx_dropped rx_errors rx_packets tx_bytes tx_dropped tx_errors
|
||||||
|
1444234982000000000 server0 eth0 1.015633926034834e+15 0 0 4.61295119435e+11 1.09308649338848e+15 0 0
|
||||||
|
```
|
||||||
|
|
||||||
## Multiple Templates
|
## Multiple Templates
|
||||||
|
|
||||||
One template may not match all metrics. For example, using multiple plugins with diamond will produce metrics in different formats. If you need to use multiple templates, you'll need to define a prefix filter that must match before the template can be applied.
|
One template may not match all metrics. For example, using multiple plugins with diamond will produce metrics in different formats. If you need to use multiple templates, you'll need to define a prefix filter that must match before the template can be applied.
|
||||||
|
@ -125,6 +158,9 @@ If you need to add the same set of tags to all metrics, you can define them glob
|
||||||
# filter + template + extra tag
|
# filter + template + extra tag
|
||||||
"stats.* .host.measurement* region=us-west,agent=sensu",
|
"stats.* .host.measurement* region=us-west,agent=sensu",
|
||||||
|
|
||||||
|
# filter + template with field key
|
||||||
|
"stats.* .host.measurement.field",
|
||||||
|
|
||||||
# default template. Ignore the first graphite component "servers"
|
# default template. Ignore the first graphite component "servers"
|
||||||
".measurement*",
|
".measurement*",
|
||||||
]
|
]
|
||||||
|
|
31
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/parser.go
generated
vendored
31
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/parser.go
generated
vendored
|
@ -100,7 +100,10 @@ func (p *Parser) Parse(line string) (models.Point, error) {
|
||||||
|
|
||||||
// decode the name and tags
|
// decode the name and tags
|
||||||
template := p.matcher.Match(fields[0])
|
template := p.matcher.Match(fields[0])
|
||||||
measurement, tags := template.Apply(fields[0])
|
measurement, tags, field, err := template.Apply(fields[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Could not extract measurement, use the raw value
|
// Could not extract measurement, use the raw value
|
||||||
if measurement == "" {
|
if measurement == "" {
|
||||||
|
@ -113,7 +116,12 @@ func (p *Parser) Parse(line string) (models.Point, error) {
|
||||||
return nil, fmt.Errorf(`field "%s" value: %s`, fields[0], err)
|
return nil, fmt.Errorf(`field "%s" value: %s`, fields[0], err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fieldValues := map[string]interface{}{"value": v}
|
fieldValues := map[string]interface{}{}
|
||||||
|
if field != "" {
|
||||||
|
fieldValues[field] = v
|
||||||
|
} else {
|
||||||
|
fieldValues["value"] = v
|
||||||
|
}
|
||||||
|
|
||||||
// If no 3rd field, use now as timestamp
|
// If no 3rd field, use now as timestamp
|
||||||
timestamp := time.Now().UTC()
|
timestamp := time.Now().UTC()
|
||||||
|
@ -149,22 +157,22 @@ func (p *Parser) Parse(line string) (models.Point, error) {
|
||||||
|
|
||||||
// Apply extracts the template fields form the given line and returns the
|
// Apply extracts the template fields form the given line and returns the
|
||||||
// measurement name and tags
|
// measurement name and tags
|
||||||
func (p *Parser) ApplyTemplate(line string) (string, map[string]string) {
|
func (p *Parser) ApplyTemplate(line string) (string, map[string]string, string, error) {
|
||||||
// Break line into fields (name, value, timestamp), only name is used
|
// Break line into fields (name, value, timestamp), only name is used
|
||||||
fields := strings.Fields(line)
|
fields := strings.Fields(line)
|
||||||
if len(fields) == 0 {
|
if len(fields) == 0 {
|
||||||
return "", make(map[string]string)
|
return "", make(map[string]string), "", nil
|
||||||
}
|
}
|
||||||
// decode the name and tags
|
// decode the name and tags
|
||||||
template := p.matcher.Match(fields[0])
|
template := p.matcher.Match(fields[0])
|
||||||
name, tags := template.Apply(fields[0])
|
name, tags, field, err := template.Apply(fields[0])
|
||||||
// Set the default tags on the point if they are not already set
|
// Set the default tags on the point if they are not already set
|
||||||
for k, v := range p.tags {
|
for k, v := range p.tags {
|
||||||
if _, ok := tags[k]; !ok {
|
if _, ok := tags[k]; !ok {
|
||||||
tags[k] = v
|
tags[k] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return name, tags
|
return name, tags, field, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// template represents a pattern and tags to map a graphite metric string to a influxdb Point
|
// template represents a pattern and tags to map a graphite metric string to a influxdb Point
|
||||||
|
@ -198,11 +206,12 @@ func NewTemplate(pattern string, defaultTags models.Tags, separator string) (*te
|
||||||
|
|
||||||
// Apply extracts the template fields form the given line and returns the measurement
|
// Apply extracts the template fields form the given line and returns the measurement
|
||||||
// name and tags
|
// name and tags
|
||||||
func (t *template) Apply(line string) (string, map[string]string) {
|
func (t *template) Apply(line string) (string, map[string]string, string, error) {
|
||||||
fields := strings.Split(line, ".")
|
fields := strings.Split(line, ".")
|
||||||
var (
|
var (
|
||||||
measurement []string
|
measurement []string
|
||||||
tags = make(map[string]string)
|
tags = make(map[string]string)
|
||||||
|
field string
|
||||||
)
|
)
|
||||||
|
|
||||||
// Set any default tags
|
// Set any default tags
|
||||||
|
@ -217,6 +226,12 @@ func (t *template) Apply(line string) (string, map[string]string) {
|
||||||
|
|
||||||
if tag == "measurement" {
|
if tag == "measurement" {
|
||||||
measurement = append(measurement, fields[i])
|
measurement = append(measurement, fields[i])
|
||||||
|
} else if tag == "field" {
|
||||||
|
if len(field) != 0 {
|
||||||
|
return "", nil, "", fmt.Errorf("'field' can only be used once in each template: %q", line)
|
||||||
|
} else {
|
||||||
|
field = fields[i]
|
||||||
|
}
|
||||||
} else if tag == "measurement*" {
|
} else if tag == "measurement*" {
|
||||||
measurement = append(measurement, fields[i:]...)
|
measurement = append(measurement, fields[i:]...)
|
||||||
break
|
break
|
||||||
|
@ -225,7 +240,7 @@ func (t *template) Apply(line string) (string, map[string]string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return strings.Join(measurement, t.separator), tags
|
return strings.Join(measurement, t.separator), tags, field, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// matcher determines which template should be applied to a given metric
|
// matcher determines which template should be applied to a given metric
|
||||||
|
|
50
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/parser_test.go
generated
vendored
50
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/parser_test.go
generated
vendored
|
@ -105,7 +105,7 @@ func TestTemplateApply(t *testing.T) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
measurement, tags := tmpl.Apply(test.input)
|
measurement, tags, _, _ := tmpl.Apply(test.input)
|
||||||
if measurement != test.measurement {
|
if measurement != test.measurement {
|
||||||
t.Fatalf("name parse failer. expected %v, got %v", test.measurement, measurement)
|
t.Fatalf("name parse failer. expected %v, got %v", test.measurement, measurement)
|
||||||
}
|
}
|
||||||
|
@ -558,7 +558,7 @@ func TestApplyTemplate(t *testing.T) {
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
measurement, _ := p.ApplyTemplate("current.users")
|
measurement, _, _, _ := p.ApplyTemplate("current.users")
|
||||||
if measurement != "current_users" {
|
if measurement != "current_users" {
|
||||||
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
|
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
|
||||||
measurement, "current_users")
|
measurement, "current_users")
|
||||||
|
@ -576,7 +576,7 @@ func TestApplyTemplateNoMatch(t *testing.T) {
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
measurement, _ := p.ApplyTemplate("current.users")
|
measurement, _, _, _ := p.ApplyTemplate("current.users")
|
||||||
if measurement != "current.users" {
|
if measurement != "current.users" {
|
||||||
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
|
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
|
||||||
measurement, "current.users")
|
measurement, "current.users")
|
||||||
|
@ -597,7 +597,7 @@ func TestApplyTemplateSpecific(t *testing.T) {
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
measurement, tags := p.ApplyTemplate("current.users.facebook")
|
measurement, tags, _, _ := p.ApplyTemplate("current.users.facebook")
|
||||||
if measurement != "current_users" {
|
if measurement != "current_users" {
|
||||||
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
|
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
|
||||||
measurement, "current_users")
|
measurement, "current_users")
|
||||||
|
@ -621,7 +621,7 @@ func TestApplyTemplateTags(t *testing.T) {
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
measurement, tags := p.ApplyTemplate("current.users")
|
measurement, tags, _, _ := p.ApplyTemplate("current.users")
|
||||||
if measurement != "current_users" {
|
if measurement != "current_users" {
|
||||||
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
|
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
|
||||||
measurement, "current_users")
|
measurement, "current_users")
|
||||||
|
@ -635,3 +635,43 @@ func TestApplyTemplateTags(t *testing.T) {
|
||||||
t.Errorf("Expected region='us-west' tag, got region='%s'", region)
|
t.Errorf("Expected region='us-west' tag, got region='%s'", region)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestApplyTemplateField(t *testing.T) {
|
||||||
|
o := graphite.Options{
|
||||||
|
Separator: "_",
|
||||||
|
Templates: []string{"current.* measurement.measurement.field"},
|
||||||
|
}
|
||||||
|
p, err := graphite.NewParserWithOptions(o)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
measurement, _, field, err := p.ApplyTemplate("current.users.logged_in")
|
||||||
|
|
||||||
|
if measurement != "current_users" {
|
||||||
|
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
|
||||||
|
measurement, "current_users")
|
||||||
|
}
|
||||||
|
|
||||||
|
if field != "logged_in" {
|
||||||
|
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s",
|
||||||
|
field, "logged_in")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApplyTemplateFieldError(t *testing.T) {
|
||||||
|
o := graphite.Options{
|
||||||
|
Separator: "_",
|
||||||
|
Templates: []string{"current.* measurement.field.field"},
|
||||||
|
}
|
||||||
|
p, err := graphite.NewParserWithOptions(o)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error creating parser, got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, _, err = p.ApplyTemplate("current.users.logged_in")
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s", err,
|
||||||
|
"'field' can only be used once in each template: current.users.logged_in")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -37,7 +37,9 @@ type Processor struct {
|
||||||
retryRateLimit int64
|
retryRateLimit int64
|
||||||
|
|
||||||
queues map[uint64]*queue
|
queues map[uint64]*queue
|
||||||
|
meta metaStore
|
||||||
writer shardWriter
|
writer shardWriter
|
||||||
|
metastore metaStore
|
||||||
Logger *log.Logger
|
Logger *log.Logger
|
||||||
|
|
||||||
// Shard-level and node-level HH stats.
|
// Shard-level and node-level HH stats.
|
||||||
|
@ -50,11 +52,12 @@ type ProcessorOptions struct {
|
||||||
RetryRateLimit int64
|
RetryRateLimit int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewProcessor(dir string, writer shardWriter, options ProcessorOptions) (*Processor, error) {
|
func NewProcessor(dir string, writer shardWriter, metastore metaStore, options ProcessorOptions) (*Processor, error) {
|
||||||
p := &Processor{
|
p := &Processor{
|
||||||
dir: dir,
|
dir: dir,
|
||||||
queues: map[uint64]*queue{},
|
queues: map[uint64]*queue{},
|
||||||
writer: writer,
|
writer: writer,
|
||||||
|
metastore: metastore,
|
||||||
Logger: log.New(os.Stderr, "[handoff] ", log.LstdFlags),
|
Logger: log.New(os.Stderr, "[handoff] ", log.LstdFlags),
|
||||||
shardStatMaps: make(map[uint64]*expvar.Map),
|
shardStatMaps: make(map[uint64]*expvar.Map),
|
||||||
nodeStatMaps: make(map[uint64]*expvar.Map),
|
nodeStatMaps: make(map[uint64]*expvar.Map),
|
||||||
|
@ -164,8 +167,13 @@ func (p *Processor) Process() error {
|
||||||
p.mu.RLock()
|
p.mu.RLock()
|
||||||
defer p.mu.RUnlock()
|
defer p.mu.RUnlock()
|
||||||
|
|
||||||
res := make(chan error, len(p.queues))
|
activeQueues, err := p.activeQueues()
|
||||||
for nodeID, q := range p.queues {
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make(chan error, len(activeQueues))
|
||||||
|
for nodeID, q := range activeQueues {
|
||||||
go func(nodeID uint64, q *queue) {
|
go func(nodeID uint64, q *queue) {
|
||||||
|
|
||||||
// Log how many writes we successfully sent at the end
|
// Log how many writes we successfully sent at the end
|
||||||
|
@ -234,7 +242,7 @@ func (p *Processor) Process() error {
|
||||||
}(nodeID, q)
|
}(nodeID, q)
|
||||||
}
|
}
|
||||||
|
|
||||||
for range p.queues {
|
for range activeQueues {
|
||||||
err := <-res
|
err := <-res
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -273,6 +281,20 @@ func (p *Processor) updateShardStats(shardID uint64, stat string, inc int64) {
|
||||||
m.Add(stat, inc)
|
m.Add(stat, inc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Processor) activeQueues() (map[uint64]*queue, error) {
|
||||||
|
queues := make(map[uint64]*queue)
|
||||||
|
for id, q := range p.queues {
|
||||||
|
ni, err := p.metastore.Node(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if ni != nil {
|
||||||
|
queues[id] = q
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return queues, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (p *Processor) PurgeOlderThan(when time.Duration) error {
|
func (p *Processor) PurgeOlderThan(when time.Duration) error {
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
defer p.mu.Unlock()
|
defer p.mu.Unlock()
|
||||||
|
@ -284,3 +306,36 @@ func (p *Processor) PurgeOlderThan(when time.Duration) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Processor) PurgeInactiveOlderThan(when time.Duration) error {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
|
||||||
|
for nodeID, queue := range p.queues {
|
||||||
|
// Only delete queues for inactive nodes.
|
||||||
|
ni, err := p.metastore.Node(nodeID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if ni != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
last, err := queue.LastModified()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if last.Before(time.Now().Add(-when)) {
|
||||||
|
// Close and remove the queue.
|
||||||
|
if err := queue.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := queue.Remove(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(p.queues, nodeID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
77
Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/processor_test.go
generated
vendored
77
Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/processor_test.go
generated
vendored
|
@ -5,6 +5,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/meta"
|
||||||
"github.com/influxdb/influxdb/models"
|
"github.com/influxdb/influxdb/models"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -16,6 +17,14 @@ func (f *fakeShardWriter) WriteShard(shardID, nodeID uint64, points []models.Poi
|
||||||
return f.ShardWriteFn(shardID, nodeID, points)
|
return f.ShardWriteFn(shardID, nodeID, points)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type fakeMetaStore struct {
|
||||||
|
NodeFn func(nodeID uint64) (*meta.NodeInfo, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeMetaStore) Node(nodeID uint64) (*meta.NodeInfo, error) {
|
||||||
|
return f.NodeFn(nodeID)
|
||||||
|
}
|
||||||
|
|
||||||
func TestProcessorProcess(t *testing.T) {
|
func TestProcessorProcess(t *testing.T) {
|
||||||
dir, err := ioutil.TempDir("", "processor_test")
|
dir, err := ioutil.TempDir("", "processor_test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -23,7 +32,7 @@ func TestProcessorProcess(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// expected data to be queue and sent to the shardWriter
|
// expected data to be queue and sent to the shardWriter
|
||||||
var expShardID, expNodeID, count = uint64(100), uint64(200), 0
|
var expShardID, activeNodeID, inactiveNodeID, count = uint64(100), uint64(200), uint64(300), 0
|
||||||
pt := models.NewPoint("cpu", models.Tags{"foo": "bar"}, models.Fields{"value": 1.0}, time.Unix(0, 0))
|
pt := models.NewPoint("cpu", models.Tags{"foo": "bar"}, models.Fields{"value": 1.0}, time.Unix(0, 0))
|
||||||
|
|
||||||
sh := &fakeShardWriter{
|
sh := &fakeShardWriter{
|
||||||
|
@ -32,8 +41,8 @@ func TestProcessorProcess(t *testing.T) {
|
||||||
if shardID != expShardID {
|
if shardID != expShardID {
|
||||||
t.Errorf("Process() shardID mismatch: got %v, exp %v", shardID, expShardID)
|
t.Errorf("Process() shardID mismatch: got %v, exp %v", shardID, expShardID)
|
||||||
}
|
}
|
||||||
if nodeID != expNodeID {
|
if nodeID != activeNodeID {
|
||||||
t.Errorf("Process() nodeID mismatch: got %v, exp %v", nodeID, expNodeID)
|
t.Errorf("Process() nodeID mismatch: got %v, exp %v", nodeID, activeNodeID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if exp := 1; len(points) != exp {
|
if exp := 1; len(points) != exp {
|
||||||
|
@ -47,14 +56,27 @@ func TestProcessorProcess(t *testing.T) {
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
metastore := &fakeMetaStore{
|
||||||
|
NodeFn: func(nodeID uint64) (*meta.NodeInfo, error) {
|
||||||
|
if nodeID == activeNodeID {
|
||||||
|
return &meta.NodeInfo{}, nil
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
p, err := NewProcessor(dir, sh, ProcessorOptions{MaxSize: 1024})
|
p, err := NewProcessor(dir, sh, metastore, ProcessorOptions{MaxSize: 1024})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Process() failed to create processor: %v", err)
|
t.Fatalf("Process() failed to create processor: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This should queue the writes
|
// This should queue a write for the active node.
|
||||||
if err := p.WriteShard(expShardID, expNodeID, []models.Point{pt}); err != nil {
|
if err := p.WriteShard(expShardID, activeNodeID, []models.Point{pt}); err != nil {
|
||||||
|
t.Fatalf("Process() failed to write points: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This should queue a write for the inactive node.
|
||||||
|
if err := p.WriteShard(expShardID, inactiveNodeID, []models.Point{pt}); err != nil {
|
||||||
t.Fatalf("Process() failed to write points: %v", err)
|
t.Fatalf("Process() failed to write points: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,7 +89,7 @@ func TestProcessorProcess(t *testing.T) {
|
||||||
t.Fatalf("Process() write count mismatch: got %v, exp %v", count, exp)
|
t.Fatalf("Process() write count mismatch: got %v, exp %v", count, exp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Queue should be empty so no writes should be send again
|
// All active nodes should have been handled so no writes should be sent again
|
||||||
if err := p.Process(); err != nil {
|
if err := p.Process(); err != nil {
|
||||||
t.Fatalf("Process() failed to write points: %v", err)
|
t.Fatalf("Process() failed to write points: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -77,4 +99,45 @@ func TestProcessorProcess(t *testing.T) {
|
||||||
t.Fatalf("Process() write count mismatch: got %v, exp %v", count, exp)
|
t.Fatalf("Process() write count mismatch: got %v, exp %v", count, exp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make the inactive node active.
|
||||||
|
sh.ShardWriteFn = func(shardID, nodeID uint64, points []models.Point) error {
|
||||||
|
count += 1
|
||||||
|
if shardID != expShardID {
|
||||||
|
t.Errorf("Process() shardID mismatch: got %v, exp %v", shardID, expShardID)
|
||||||
|
}
|
||||||
|
if nodeID != inactiveNodeID {
|
||||||
|
t.Errorf("Process() nodeID mismatch: got %v, exp %v", nodeID, activeNodeID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := 1; len(points) != exp {
|
||||||
|
t.Fatalf("Process() points mismatch: got %v, exp %v", len(points), exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
if points[0].String() != pt.String() {
|
||||||
|
t.Fatalf("Process() points mismatch:\n got %v\n exp %v", points[0].String(), pt.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
metastore.NodeFn = func(nodeID uint64) (*meta.NodeInfo, error) {
|
||||||
|
return &meta.NodeInfo{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This should send the final write to the shard writer
|
||||||
|
if err := p.Process(); err != nil {
|
||||||
|
t.Fatalf("Process() failed to write points: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := 2; count != exp {
|
||||||
|
t.Fatalf("Process() write count mismatch: got %v, exp %v", count, exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// All queues should have been handled, so no more writes should result.
|
||||||
|
if err := p.Process(); err != nil {
|
||||||
|
t.Fatalf("Process() failed to write points: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := 2; count != exp {
|
||||||
|
t.Fatalf("Process() write count mismatch: got %v, exp %v", count, exp)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,6 +134,19 @@ func (l *queue) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove removes all underlying file-based resources for the queue.
|
||||||
|
// It is an error to call this on an open queue.
|
||||||
|
func (l *queue) Remove() error {
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
|
||||||
|
if l.head != nil || l.tail != nil || l.segments != nil {
|
||||||
|
return fmt.Errorf("queue is open")
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.RemoveAll(l.dir)
|
||||||
|
}
|
||||||
|
|
||||||
// SetMaxSegmentSize updates the max segment size for new and existing
|
// SetMaxSegmentSize updates the max segment size for new and existing
|
||||||
// segments.
|
// segments.
|
||||||
func (l *queue) SetMaxSegmentSize(size int64) error {
|
func (l *queue) SetMaxSegmentSize(size int64) error {
|
||||||
|
@ -160,9 +173,8 @@ func (l *queue) PurgeOlderThan(when time.Time) error {
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
defer l.mu.Unlock()
|
defer l.mu.Unlock()
|
||||||
|
|
||||||
// Add a new empty segment so old ones can be reclaimed
|
if len(l.segments) == 0 {
|
||||||
if _, err := l.addSegment(); err != nil {
|
return nil
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cutoff := when.Truncate(time.Second)
|
cutoff := when.Truncate(time.Second)
|
||||||
|
@ -175,12 +187,33 @@ func (l *queue) PurgeOlderThan(when time.Time) error {
|
||||||
if mod.After(cutoff) || mod.Equal(cutoff) {
|
if mod.After(cutoff) || mod.Equal(cutoff) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If this is the last segment, first append a new one allowing
|
||||||
|
// trimming to proceed.
|
||||||
|
if len(l.segments) == 1 {
|
||||||
|
_, err := l.addSegment()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := l.trimHead(); err != nil {
|
if err := l.trimHead(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LastModified returns the last time the queue was modified.
|
||||||
|
func (l *queue) LastModified() (time.Time, error) {
|
||||||
|
l.mu.RLock()
|
||||||
|
defer l.mu.RUnlock()
|
||||||
|
|
||||||
|
if l.tail != nil {
|
||||||
|
return l.tail.lastModified()
|
||||||
|
}
|
||||||
|
return time.Time{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// diskUsage returns the total size on disk used by the queue
|
// diskUsage returns the total size on disk used by the queue
|
||||||
func (l *queue) diskUsage() int64 {
|
func (l *queue) diskUsage() int64 {
|
||||||
var size int64
|
var size int64
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdb/influxdb"
|
"github.com/influxdb/influxdb"
|
||||||
|
"github.com/influxdb/influxdb/meta"
|
||||||
"github.com/influxdb/influxdb/models"
|
"github.com/influxdb/influxdb/models"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -38,6 +39,7 @@ type Service struct {
|
||||||
WriteShard(shardID, ownerID uint64, points []models.Point) error
|
WriteShard(shardID, ownerID uint64, points []models.Point) error
|
||||||
Process() error
|
Process() error
|
||||||
PurgeOlderThan(when time.Duration) error
|
PurgeOlderThan(when time.Duration) error
|
||||||
|
PurgeInactiveOlderThan(when time.Duration) error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,8 +47,12 @@ type shardWriter interface {
|
||||||
WriteShard(shardID, ownerID uint64, points []models.Point) error
|
WriteShard(shardID, ownerID uint64, points []models.Point) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type metaStore interface {
|
||||||
|
Node(id uint64) (ni *meta.NodeInfo, err error)
|
||||||
|
}
|
||||||
|
|
||||||
// NewService returns a new instance of Service.
|
// NewService returns a new instance of Service.
|
||||||
func NewService(c Config, w shardWriter) *Service {
|
func NewService(c Config, w shardWriter, m metaStore) *Service {
|
||||||
key := strings.Join([]string{"hh", c.Dir}, ":")
|
key := strings.Join([]string{"hh", c.Dir}, ":")
|
||||||
tags := map[string]string{"path": c.Dir}
|
tags := map[string]string{"path": c.Dir}
|
||||||
|
|
||||||
|
@ -55,7 +61,7 @@ func NewService(c Config, w shardWriter) *Service {
|
||||||
statMap: influxdb.NewStatistics(key, "hh", tags),
|
statMap: influxdb.NewStatistics(key, "hh", tags),
|
||||||
Logger: log.New(os.Stderr, "[handoff] ", log.LstdFlags),
|
Logger: log.New(os.Stderr, "[handoff] ", log.LstdFlags),
|
||||||
}
|
}
|
||||||
processor, err := NewProcessor(c.Dir, w, ProcessorOptions{
|
processor, err := NewProcessor(c.Dir, w, m, ProcessorOptions{
|
||||||
MaxSize: c.MaxSize,
|
MaxSize: c.MaxSize,
|
||||||
RetryRateLimit: c.RetryRateLimit,
|
RetryRateLimit: c.RetryRateLimit,
|
||||||
})
|
})
|
||||||
|
@ -83,9 +89,10 @@ func (s *Service) Open() error {
|
||||||
|
|
||||||
s.Logger.Printf("Using data dir: %v", s.cfg.Dir)
|
s.Logger.Printf("Using data dir: %v", s.cfg.Dir)
|
||||||
|
|
||||||
s.wg.Add(2)
|
s.wg.Add(3)
|
||||||
go s.retryWrites()
|
go s.retryWrites()
|
||||||
go s.expireWrites()
|
go s.expireWrites()
|
||||||
|
go s.deleteInactiveQueues()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -165,8 +172,19 @@ func (s *Service) expireWrites() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// purgeWrites will cause the handoff queues to remove writes that are no longer
|
// deleteInactiveQueues will cause the service to remove queues for inactive nodes.
|
||||||
// valid. e.g. queued writes for a node that has been removed
|
func (s *Service) deleteInactiveQueues() {
|
||||||
func (s *Service) purgeWrites() {
|
defer s.wg.Done()
|
||||||
panic("not implemented")
|
ticker := time.NewTicker(time.Hour)
|
||||||
|
defer ticker.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-s.closing:
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
if err := s.HintedHandoff.PurgeInactiveOlderThan(time.Duration(s.cfg.MaxAge)); err != nil {
|
||||||
|
s.Logger.Printf("delete queues failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -189,7 +189,7 @@ func (h *Handler) serveProcessContinuousQueries(w http.ResponseWriter, r *http.R
|
||||||
// Get the name of the CQ to run (blank means run all).
|
// Get the name of the CQ to run (blank means run all).
|
||||||
name := q.Get("name")
|
name := q.Get("name")
|
||||||
// Get the time for which the CQ should be evaluated.
|
// Get the time for which the CQ should be evaluated.
|
||||||
var t time.Time
|
t := time.Now()
|
||||||
var err error
|
var err error
|
||||||
s := q.Get("time")
|
s := q.Get("time")
|
||||||
if s != "" {
|
if s != "" {
|
||||||
|
|
18
Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/handler_test.go
generated
vendored
18
Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/handler_test.go
generated
vendored
|
@ -157,6 +157,24 @@ func TestHandler_Query(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure the handler returns results from a query (including nil results).
|
||||||
|
func TestHandler_QueryRegex(t *testing.T) {
|
||||||
|
h := NewHandler(false)
|
||||||
|
h.QueryExecutor.ExecuteQueryFn = func(q *influxql.Query, db string, chunkSize int) (<-chan *influxql.Result, error) {
|
||||||
|
if q.String() != `SELECT * FROM test WHERE url =~ /http\:\/\/www.akamai\.com/` {
|
||||||
|
t.Fatalf("unexpected query: %s", q.String())
|
||||||
|
} else if db != `test` {
|
||||||
|
t.Fatalf("unexpected db: %s", db)
|
||||||
|
}
|
||||||
|
return NewResultChan(
|
||||||
|
nil,
|
||||||
|
), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
h.ServeHTTP(w, MustNewRequest("GET", "/query?db=test&q=SELECT%20%2A%20FROM%20test%20WHERE%20url%20%3D~%20%2Fhttp%5C%3A%5C%2F%5C%2Fwww.akamai%5C.com%2F", nil))
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure the handler merges results from the same statement.
|
// Ensure the handler merges results from the same statement.
|
||||||
func TestHandler_Query_MergeResults(t *testing.T) {
|
func TestHandler_Query_MergeResults(t *testing.T) {
|
||||||
h := NewHandler(false)
|
h := NewHandler(false)
|
||||||
|
|
11
Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/service.go
generated
vendored
11
Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/service.go
generated
vendored
|
@ -49,6 +49,7 @@ type Service struct {
|
||||||
ln net.Listener // main listener
|
ln net.Listener // main listener
|
||||||
httpln *chanListener // http channel-based listener
|
httpln *chanListener // http channel-based listener
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
err chan error
|
err chan error
|
||||||
|
@ -104,6 +105,9 @@ func NewService(c Config) (*Service, error) {
|
||||||
|
|
||||||
// Open starts the service
|
// Open starts the service
|
||||||
func (s *Service) Open() error {
|
func (s *Service) Open() error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
s.Logger.Println("Starting OpenTSDB service")
|
s.Logger.Println("Starting OpenTSDB service")
|
||||||
|
|
||||||
// Configure expvar monitoring. It's OK to do this even if the service fails to open and
|
// Configure expvar monitoring. It's OK to do this even if the service fails to open and
|
||||||
|
@ -164,13 +168,18 @@ func (s *Service) Open() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the underlying listener.
|
// Close closes the openTSDB service
|
||||||
func (s *Service) Close() error {
|
func (s *Service) Close() error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
if s.ln != nil {
|
if s.ln != nil {
|
||||||
return s.ln.Close()
|
return s.ln.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.batcher != nil {
|
||||||
s.batcher.Stop()
|
s.batcher.Stop()
|
||||||
|
}
|
||||||
close(s.done)
|
close(s.done)
|
||||||
s.wg.Wait()
|
s.wg.Wait()
|
||||||
return nil
|
return nil
|
||||||
|
|
10
Godeps/_workspace/src/github.com/influxdb/influxdb/services/subscriber/config.go
generated
vendored
Normal file
10
Godeps/_workspace/src/github.com/influxdb/influxdb/services/subscriber/config.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
package subscriber
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
// Whether to enable to Subscriber service
|
||||||
|
Enabled bool `toml:"enabled"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewConfig() Config {
|
||||||
|
return Config{Enabled: true}
|
||||||
|
}
|
23
Godeps/_workspace/src/github.com/influxdb/influxdb/services/subscriber/config_test.go
generated
vendored
Normal file
23
Godeps/_workspace/src/github.com/influxdb/influxdb/services/subscriber/config_test.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
package subscriber_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/BurntSushi/toml"
|
||||||
|
"github.com/influxdb/influxdb/services/subscriber"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConfig_Parse(t *testing.T) {
|
||||||
|
// Parse configuration.
|
||||||
|
var c subscriber.Config
|
||||||
|
if _, err := toml.Decode(`
|
||||||
|
enabled = false
|
||||||
|
`, &c); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate configuration.
|
||||||
|
if c.Enabled != false {
|
||||||
|
t.Fatalf("unexpected enabled state: %v", c.Enabled)
|
||||||
|
}
|
||||||
|
}
|
261
Godeps/_workspace/src/github.com/influxdb/influxdb/services/subscriber/service.go
generated
vendored
Normal file
261
Godeps/_workspace/src/github.com/influxdb/influxdb/services/subscriber/service.go
generated
vendored
Normal file
|
@ -0,0 +1,261 @@
|
||||||
|
package subscriber
|
||||||
|
|
||||||
|
import (
|
||||||
|
"expvar"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb"
|
||||||
|
"github.com/influxdb/influxdb/cluster"
|
||||||
|
"github.com/influxdb/influxdb/meta"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Statistics for the Subscriber service.
|
||||||
|
const (
|
||||||
|
statPointsWritten = "points_written"
|
||||||
|
statWriteFailures = "write_failures"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PointsWriter interface {
|
||||||
|
WritePoints(p *cluster.WritePointsRequest) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// unique set that identifies a given subscription
|
||||||
|
type subEntry struct {
|
||||||
|
db string
|
||||||
|
rp string
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Subscriber service manages forking the incoming data from InfluxDB
|
||||||
|
// to defined third party destinations.
|
||||||
|
// Subscriptions are defined per database and retention policy.
|
||||||
|
type Service struct {
|
||||||
|
subs map[subEntry]PointsWriter
|
||||||
|
MetaStore interface {
|
||||||
|
Databases() ([]meta.DatabaseInfo, error)
|
||||||
|
WaitForDataChanged() error
|
||||||
|
}
|
||||||
|
NewPointsWriter func(u url.URL) (PointsWriter, error)
|
||||||
|
Logger *log.Logger
|
||||||
|
statMap *expvar.Map
|
||||||
|
points chan *cluster.WritePointsRequest
|
||||||
|
wg sync.WaitGroup
|
||||||
|
closed bool
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewService(c Config) *Service {
|
||||||
|
return &Service{
|
||||||
|
subs: make(map[subEntry]PointsWriter),
|
||||||
|
NewPointsWriter: newPointsWriter,
|
||||||
|
Logger: log.New(os.Stderr, "[subscriber] ", log.LstdFlags),
|
||||||
|
statMap: influxdb.NewStatistics("subscriber", "subscriber", nil),
|
||||||
|
points: make(chan *cluster.WritePointsRequest),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) Open() error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
if s.MetaStore == nil {
|
||||||
|
panic("no meta store")
|
||||||
|
}
|
||||||
|
|
||||||
|
s.closed = false
|
||||||
|
|
||||||
|
// Perform initial update
|
||||||
|
s.Update()
|
||||||
|
|
||||||
|
s.wg.Add(1)
|
||||||
|
go s.writePoints()
|
||||||
|
// Do not wait for this goroutine since it block until a meta change occurs.
|
||||||
|
go s.waitForMetaUpdates()
|
||||||
|
|
||||||
|
s.Logger.Println("opened service")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) Close() error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
close(s.points)
|
||||||
|
s.closed = true
|
||||||
|
s.wg.Wait()
|
||||||
|
s.Logger.Println("closed service")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) waitForMetaUpdates() {
|
||||||
|
for {
|
||||||
|
err := s.MetaStore.WaitForDataChanged()
|
||||||
|
if err != nil {
|
||||||
|
s.Logger.Printf("error while waiting for meta data changes, err: %v\n", err)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
//Check that we haven't been closed before performing update.
|
||||||
|
s.mu.Lock()
|
||||||
|
if !s.closed {
|
||||||
|
s.mu.Unlock()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
s.Update()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// start new and stop deleted subscriptions.
|
||||||
|
func (s *Service) Update() error {
|
||||||
|
s.Logger.Println("updating subscriptions")
|
||||||
|
dbis, err := s.MetaStore.Databases()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
allEntries := make(map[subEntry]bool, 0)
|
||||||
|
// Add in new subscriptions
|
||||||
|
for _, dbi := range dbis {
|
||||||
|
for _, rpi := range dbi.RetentionPolicies {
|
||||||
|
for _, si := range rpi.Subscriptions {
|
||||||
|
se := subEntry{
|
||||||
|
db: dbi.Name,
|
||||||
|
rp: rpi.Name,
|
||||||
|
name: si.Name,
|
||||||
|
}
|
||||||
|
allEntries[se] = true
|
||||||
|
if _, ok := s.subs[se]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sub, err := s.createSubscription(se, si.Mode, si.Destinations)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.subs[se] = sub
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove deleted subs
|
||||||
|
for se := range s.subs {
|
||||||
|
if !allEntries[se] {
|
||||||
|
delete(s.subs, se)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) createSubscription(se subEntry, mode string, destinations []string) (PointsWriter, error) {
|
||||||
|
var bm BalanceMode
|
||||||
|
switch mode {
|
||||||
|
case "ALL":
|
||||||
|
bm = ALL
|
||||||
|
case "ANY":
|
||||||
|
bm = ANY
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown balance mode %q", mode)
|
||||||
|
}
|
||||||
|
writers := make([]PointsWriter, len(destinations))
|
||||||
|
statMaps := make([]*expvar.Map, len(writers))
|
||||||
|
for i, dest := range destinations {
|
||||||
|
u, err := url.Parse(dest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
w, err := s.NewPointsWriter(*u)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
writers[i] = w
|
||||||
|
tags := map[string]string{
|
||||||
|
"database": se.db,
|
||||||
|
"retention_policy": se.rp,
|
||||||
|
"name": se.name,
|
||||||
|
"mode": mode,
|
||||||
|
"destination": dest,
|
||||||
|
}
|
||||||
|
key := strings.Join([]string{"subscriber", se.db, se.rp, se.name, dest}, ":")
|
||||||
|
statMaps[i] = influxdb.NewStatistics(key, "subscriber", tags)
|
||||||
|
}
|
||||||
|
return &balancewriter{
|
||||||
|
bm: bm,
|
||||||
|
writers: writers,
|
||||||
|
statMaps: statMaps,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return channel into which write point requests can be sent.
|
||||||
|
func (s *Service) Points() chan<- *cluster.WritePointsRequest {
|
||||||
|
return s.points
|
||||||
|
}
|
||||||
|
|
||||||
|
// read points off chan and write them
|
||||||
|
func (s *Service) writePoints() {
|
||||||
|
defer s.wg.Done()
|
||||||
|
for p := range s.points {
|
||||||
|
for se, sub := range s.subs {
|
||||||
|
if p.Database == se.db && p.RetentionPolicy == se.rp {
|
||||||
|
err := sub.WritePoints(p)
|
||||||
|
if err != nil {
|
||||||
|
s.Logger.Println(err)
|
||||||
|
s.statMap.Add(statWriteFailures, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.statMap.Add(statPointsWritten, int64(len(p.Points)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type BalanceMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
ALL BalanceMode = iota
|
||||||
|
ANY
|
||||||
|
)
|
||||||
|
|
||||||
|
// balances writes across PointsWriters according to BalanceMode
|
||||||
|
type balancewriter struct {
|
||||||
|
bm BalanceMode
|
||||||
|
writers []PointsWriter
|
||||||
|
statMaps []*expvar.Map
|
||||||
|
i int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *balancewriter) WritePoints(p *cluster.WritePointsRequest) error {
|
||||||
|
var lastErr error
|
||||||
|
for range b.writers {
|
||||||
|
// round robin through destinations.
|
||||||
|
i := b.i
|
||||||
|
w := b.writers[i]
|
||||||
|
b.i = (b.i + 1) % len(b.writers)
|
||||||
|
|
||||||
|
// write points to destination.
|
||||||
|
err := w.WritePoints(p)
|
||||||
|
if err != nil {
|
||||||
|
lastErr = err
|
||||||
|
b.statMaps[i].Add(statWriteFailures, 1)
|
||||||
|
} else {
|
||||||
|
b.statMaps[i].Add(statPointsWritten, int64(len(p.Points)))
|
||||||
|
if b.bm == ANY {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return lastErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a PointsWriter from the given URL
|
||||||
|
func newPointsWriter(u url.URL) (PointsWriter, error) {
|
||||||
|
switch u.Scheme {
|
||||||
|
case "udp":
|
||||||
|
return NewUDP(u.Host), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown destination scheme %s", u.Scheme)
|
||||||
|
}
|
||||||
|
}
|
389
Godeps/_workspace/src/github.com/influxdb/influxdb/services/subscriber/service_test.go
generated
vendored
Normal file
389
Godeps/_workspace/src/github.com/influxdb/influxdb/services/subscriber/service_test.go
generated
vendored
Normal file
|
@ -0,0 +1,389 @@
|
||||||
|
package subscriber_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/cluster"
|
||||||
|
"github.com/influxdb/influxdb/meta"
|
||||||
|
"github.com/influxdb/influxdb/services/subscriber"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MetaStore struct {
|
||||||
|
DatabasesFn func() ([]meta.DatabaseInfo, error)
|
||||||
|
WaitForDataChangedFn func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m MetaStore) Databases() ([]meta.DatabaseInfo, error) {
|
||||||
|
return m.DatabasesFn()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m MetaStore) WaitForDataChanged() error {
|
||||||
|
return m.WaitForDataChangedFn()
|
||||||
|
}
|
||||||
|
|
||||||
|
type Subscription struct {
|
||||||
|
WritePointsFn func(*cluster.WritePointsRequest) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Subscription) WritePoints(p *cluster.WritePointsRequest) error {
|
||||||
|
return s.WritePointsFn(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestService_IgnoreNonMatch(t *testing.T) {
|
||||||
|
dataChanged := make(chan bool)
|
||||||
|
ms := MetaStore{}
|
||||||
|
ms.WaitForDataChangedFn = func() error {
|
||||||
|
<-dataChanged
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) {
|
||||||
|
return []meta.DatabaseInfo{
|
||||||
|
{
|
||||||
|
Name: "db0",
|
||||||
|
RetentionPolicies: []meta.RetentionPolicyInfo{
|
||||||
|
{
|
||||||
|
Name: "rp0",
|
||||||
|
Subscriptions: []meta.SubscriptionInfo{
|
||||||
|
{Name: "s0", Mode: "ANY", Destinations: []string{"udp://h0:9093", "udp://h1:9093"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
prs := make(chan *cluster.WritePointsRequest, 2)
|
||||||
|
urls := make(chan url.URL, 2)
|
||||||
|
newPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) {
|
||||||
|
sub := Subscription{}
|
||||||
|
sub.WritePointsFn = func(p *cluster.WritePointsRequest) error {
|
||||||
|
prs <- p
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
urls <- u
|
||||||
|
return sub, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s := subscriber.NewService(subscriber.NewConfig())
|
||||||
|
s.MetaStore = ms
|
||||||
|
s.NewPointsWriter = newPointsWriter
|
||||||
|
s.Open()
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
// Signal that data has changed
|
||||||
|
dataChanged <- true
|
||||||
|
|
||||||
|
for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093"} {
|
||||||
|
var u url.URL
|
||||||
|
expURL, _ := url.Parse(expURLStr)
|
||||||
|
select {
|
||||||
|
case u = <-urls:
|
||||||
|
case <-time.After(10 * time.Millisecond):
|
||||||
|
t.Fatal("expected urls")
|
||||||
|
}
|
||||||
|
if expURL.String() != u.String() {
|
||||||
|
t.Fatalf("unexpected url: got %s exp %s", u.String(), expURL.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write points that don't match any subscription.
|
||||||
|
s.Points() <- &cluster.WritePointsRequest{
|
||||||
|
Database: "db1",
|
||||||
|
RetentionPolicy: "rp0",
|
||||||
|
}
|
||||||
|
s.Points() <- &cluster.WritePointsRequest{
|
||||||
|
Database: "db0",
|
||||||
|
RetentionPolicy: "rp2",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shouldn't get any prs back
|
||||||
|
select {
|
||||||
|
case pr := <-prs:
|
||||||
|
t.Fatalf("unexpected points request %v", pr)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
close(dataChanged)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestService_ModeALL(t *testing.T) {
|
||||||
|
dataChanged := make(chan bool)
|
||||||
|
ms := MetaStore{}
|
||||||
|
ms.WaitForDataChangedFn = func() error {
|
||||||
|
<-dataChanged
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) {
|
||||||
|
return []meta.DatabaseInfo{
|
||||||
|
{
|
||||||
|
Name: "db0",
|
||||||
|
RetentionPolicies: []meta.RetentionPolicyInfo{
|
||||||
|
{
|
||||||
|
Name: "rp0",
|
||||||
|
Subscriptions: []meta.SubscriptionInfo{
|
||||||
|
{Name: "s0", Mode: "ALL", Destinations: []string{"udp://h0:9093", "udp://h1:9093"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
prs := make(chan *cluster.WritePointsRequest, 2)
|
||||||
|
urls := make(chan url.URL, 2)
|
||||||
|
newPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) {
|
||||||
|
sub := Subscription{}
|
||||||
|
sub.WritePointsFn = func(p *cluster.WritePointsRequest) error {
|
||||||
|
prs <- p
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
urls <- u
|
||||||
|
return sub, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s := subscriber.NewService(subscriber.NewConfig())
|
||||||
|
s.MetaStore = ms
|
||||||
|
s.NewPointsWriter = newPointsWriter
|
||||||
|
s.Open()
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
// Signal that data has changed
|
||||||
|
dataChanged <- true
|
||||||
|
|
||||||
|
for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093"} {
|
||||||
|
var u url.URL
|
||||||
|
expURL, _ := url.Parse(expURLStr)
|
||||||
|
select {
|
||||||
|
case u = <-urls:
|
||||||
|
case <-time.After(10 * time.Millisecond):
|
||||||
|
t.Fatal("expected urls")
|
||||||
|
}
|
||||||
|
if expURL.String() != u.String() {
|
||||||
|
t.Fatalf("unexpected url: got %s exp %s", u.String(), expURL.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write points that match subscription with mode ALL
|
||||||
|
expPR := &cluster.WritePointsRequest{
|
||||||
|
Database: "db0",
|
||||||
|
RetentionPolicy: "rp0",
|
||||||
|
}
|
||||||
|
s.Points() <- expPR
|
||||||
|
|
||||||
|
// Should get pr back twice
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
var pr *cluster.WritePointsRequest
|
||||||
|
select {
|
||||||
|
case pr = <-prs:
|
||||||
|
case <-time.After(10 * time.Millisecond):
|
||||||
|
t.Fatalf("expected points request: got %d exp 2", i)
|
||||||
|
}
|
||||||
|
if pr != expPR {
|
||||||
|
t.Errorf("unexpected points request: got %v, exp %v", pr, expPR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(dataChanged)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestService_ModeANY(t *testing.T) {
|
||||||
|
dataChanged := make(chan bool)
|
||||||
|
ms := MetaStore{}
|
||||||
|
ms.WaitForDataChangedFn = func() error {
|
||||||
|
<-dataChanged
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) {
|
||||||
|
return []meta.DatabaseInfo{
|
||||||
|
{
|
||||||
|
Name: "db0",
|
||||||
|
RetentionPolicies: []meta.RetentionPolicyInfo{
|
||||||
|
{
|
||||||
|
Name: "rp0",
|
||||||
|
Subscriptions: []meta.SubscriptionInfo{
|
||||||
|
{Name: "s0", Mode: "ANY", Destinations: []string{"udp://h0:9093", "udp://h1:9093"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
prs := make(chan *cluster.WritePointsRequest, 2)
|
||||||
|
urls := make(chan url.URL, 2)
|
||||||
|
newPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) {
|
||||||
|
sub := Subscription{}
|
||||||
|
sub.WritePointsFn = func(p *cluster.WritePointsRequest) error {
|
||||||
|
prs <- p
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
urls <- u
|
||||||
|
return sub, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s := subscriber.NewService(subscriber.NewConfig())
|
||||||
|
s.MetaStore = ms
|
||||||
|
s.NewPointsWriter = newPointsWriter
|
||||||
|
s.Open()
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
// Signal that data has changed
|
||||||
|
dataChanged <- true
|
||||||
|
|
||||||
|
for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093"} {
|
||||||
|
var u url.URL
|
||||||
|
expURL, _ := url.Parse(expURLStr)
|
||||||
|
select {
|
||||||
|
case u = <-urls:
|
||||||
|
case <-time.After(10 * time.Millisecond):
|
||||||
|
t.Fatal("expected urls")
|
||||||
|
}
|
||||||
|
if expURL.String() != u.String() {
|
||||||
|
t.Fatalf("unexpected url: got %s exp %s", u.String(), expURL.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Write points that match subscription with mode ANY
|
||||||
|
expPR := &cluster.WritePointsRequest{
|
||||||
|
Database: "db0",
|
||||||
|
RetentionPolicy: "rp0",
|
||||||
|
}
|
||||||
|
s.Points() <- expPR
|
||||||
|
|
||||||
|
// Validate we get the pr back just once
|
||||||
|
var pr *cluster.WritePointsRequest
|
||||||
|
select {
|
||||||
|
case pr = <-prs:
|
||||||
|
case <-time.After(10 * time.Millisecond):
|
||||||
|
t.Fatal("expected points request")
|
||||||
|
}
|
||||||
|
if pr != expPR {
|
||||||
|
t.Errorf("unexpected points request: got %v, exp %v", pr, expPR)
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldn't get it a second time
|
||||||
|
select {
|
||||||
|
case pr = <-prs:
|
||||||
|
t.Fatalf("unexpected points request %v", pr)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
close(dataChanged)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestService_Multiple(t *testing.T) {
|
||||||
|
dataChanged := make(chan bool)
|
||||||
|
ms := MetaStore{}
|
||||||
|
ms.WaitForDataChangedFn = func() error {
|
||||||
|
<-dataChanged
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ms.DatabasesFn = func() ([]meta.DatabaseInfo, error) {
|
||||||
|
return []meta.DatabaseInfo{
|
||||||
|
{
|
||||||
|
Name: "db0",
|
||||||
|
RetentionPolicies: []meta.RetentionPolicyInfo{
|
||||||
|
{
|
||||||
|
Name: "rp0",
|
||||||
|
Subscriptions: []meta.SubscriptionInfo{
|
||||||
|
{Name: "s0", Mode: "ANY", Destinations: []string{"udp://h0:9093", "udp://h1:9093"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "rp1",
|
||||||
|
Subscriptions: []meta.SubscriptionInfo{
|
||||||
|
{Name: "s1", Mode: "ALL", Destinations: []string{"udp://h2:9093", "udp://h3:9093"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
prs := make(chan *cluster.WritePointsRequest, 4)
|
||||||
|
urls := make(chan url.URL, 4)
|
||||||
|
newPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) {
|
||||||
|
sub := Subscription{}
|
||||||
|
sub.WritePointsFn = func(p *cluster.WritePointsRequest) error {
|
||||||
|
prs <- p
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
urls <- u
|
||||||
|
return sub, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s := subscriber.NewService(subscriber.NewConfig())
|
||||||
|
s.MetaStore = ms
|
||||||
|
s.NewPointsWriter = newPointsWriter
|
||||||
|
s.Open()
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
// Signal that data has changed
|
||||||
|
dataChanged <- true
|
||||||
|
|
||||||
|
for _, expURLStr := range []string{"udp://h0:9093", "udp://h1:9093", "udp://h2:9093", "udp://h3:9093"} {
|
||||||
|
var u url.URL
|
||||||
|
expURL, _ := url.Parse(expURLStr)
|
||||||
|
select {
|
||||||
|
case u = <-urls:
|
||||||
|
case <-time.After(10 * time.Millisecond):
|
||||||
|
t.Fatal("expected urls")
|
||||||
|
}
|
||||||
|
if expURL.String() != u.String() {
|
||||||
|
t.Fatalf("unexpected url: got %s exp %s", u.String(), expURL.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write points that don't match any subscription.
|
||||||
|
s.Points() <- &cluster.WritePointsRequest{
|
||||||
|
Database: "db1",
|
||||||
|
RetentionPolicy: "rp0",
|
||||||
|
}
|
||||||
|
s.Points() <- &cluster.WritePointsRequest{
|
||||||
|
Database: "db0",
|
||||||
|
RetentionPolicy: "rp2",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write points that match subscription with mode ANY
|
||||||
|
expPR := &cluster.WritePointsRequest{
|
||||||
|
Database: "db0",
|
||||||
|
RetentionPolicy: "rp0",
|
||||||
|
}
|
||||||
|
s.Points() <- expPR
|
||||||
|
|
||||||
|
// Validate we get the pr back just once
|
||||||
|
var pr *cluster.WritePointsRequest
|
||||||
|
select {
|
||||||
|
case pr = <-prs:
|
||||||
|
case <-time.After(10 * time.Millisecond):
|
||||||
|
t.Fatal("expected points request")
|
||||||
|
}
|
||||||
|
if pr != expPR {
|
||||||
|
t.Errorf("unexpected points request: got %v, exp %v", pr, expPR)
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldn't get it a second time
|
||||||
|
select {
|
||||||
|
case pr = <-prs:
|
||||||
|
t.Fatalf("unexpected points request %v", pr)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write points that match subscription with mode ALL
|
||||||
|
expPR = &cluster.WritePointsRequest{
|
||||||
|
Database: "db0",
|
||||||
|
RetentionPolicy: "rp1",
|
||||||
|
}
|
||||||
|
s.Points() <- expPR
|
||||||
|
|
||||||
|
// Should get pr back twice
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
select {
|
||||||
|
case pr = <-prs:
|
||||||
|
case <-time.After(10 * time.Millisecond):
|
||||||
|
t.Fatalf("expected points request: got %d exp 2", i)
|
||||||
|
}
|
||||||
|
if pr != expPR {
|
||||||
|
t.Errorf("unexpected points request: got %v, exp %v", pr, expPR)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(dataChanged)
|
||||||
|
}
|
40
Godeps/_workspace/src/github.com/influxdb/influxdb/services/subscriber/udp.go
generated
vendored
Normal file
40
Godeps/_workspace/src/github.com/influxdb/influxdb/services/subscriber/udp.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
package subscriber
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/cluster"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Writes points over UDP using the line protocol
|
||||||
|
type UDP struct {
|
||||||
|
addr string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewUDP(addr string) *UDP {
|
||||||
|
return &UDP{addr: addr}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *UDP) WritePoints(p *cluster.WritePointsRequest) (err error) {
|
||||||
|
var addr *net.UDPAddr
|
||||||
|
var con *net.UDPConn
|
||||||
|
addr, err = net.ResolveUDPAddr("udp", u.addr)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
con, err = net.DialUDP("udp", nil, addr)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer con.Close()
|
||||||
|
|
||||||
|
for _, p := range p.Points {
|
||||||
|
_, err = con.Write([]byte(p.String()))
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
13
Godeps/_workspace/src/github.com/influxdb/influxdb/services/udp/README.md
generated
vendored
Normal file
13
Godeps/_workspace/src/github.com/influxdb/influxdb/services/udp/README.md
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
# Configuration
|
||||||
|
|
||||||
|
Each UDP input allows the binding address, target database, and target retention policy to be set. If the database does not exist, it will be created automatically when the input is initialized. If the retention policy is not configured, then the default retention policy for the database is used. However if the retention policy is set, the retention policy must be explicitly created. The input will not automatically create it.
|
||||||
|
|
||||||
|
Each UDP input also performs internal batching of the points it receives, as batched writes to the database are more efficient. The default _batch size_ is 1000, _pending batch_ factor is 5, with a _batch timeout_ of 1 second. This means the input will write batches of maximum size 1000, but if a batch has not reached 1000 points within 1 second of the first point being added to a batch, it will emit that batch regardless of size. The pending batch factor controls how many batches can be in memory at once, allowing the input to transmit a batch, while still building other batches.
|
||||||
|
|
||||||
|
# Processing
|
||||||
|
|
||||||
|
The UDP input can receive up to 64KB per read, and splits the received data by newline. Each part is then interpreted as line-protocol encoded points, and parsed accordingly.
|
||||||
|
|
||||||
|
# UDP is connectionless
|
||||||
|
|
||||||
|
Since UDP is a connectionless protocol there is no way to signal to the data source if any error occurs, and if data has even been successfully indexed. This should be kept in mind when deciding if and when to use the UDP input. The built-in UDP statistics are useful for monitoring the UDP inputs.
|
|
@ -7,6 +7,9 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// DefaultDatabase is the default database for UDP traffic.
|
||||||
|
DefaultDatabase = "udp"
|
||||||
|
|
||||||
// DefaultBatchSize is the default UDP batch size.
|
// DefaultBatchSize is the default UDP batch size.
|
||||||
DefaultBatchSize = 1000
|
DefaultBatchSize = 1000
|
||||||
|
|
||||||
|
@ -32,6 +35,9 @@ type Config struct {
|
||||||
// default values set.
|
// default values set.
|
||||||
func (c *Config) WithDefaults() *Config {
|
func (c *Config) WithDefaults() *Config {
|
||||||
d := *c
|
d := *c
|
||||||
|
if d.Database == "" {
|
||||||
|
d.Database = DefaultDatabase
|
||||||
|
}
|
||||||
if d.BatchSize == 0 {
|
if d.BatchSize == 0 {
|
||||||
d.BatchSize = DefaultBatchSize
|
d.BatchSize = DefaultBatchSize
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
|
|
||||||
"github.com/influxdb/influxdb"
|
"github.com/influxdb/influxdb"
|
||||||
"github.com/influxdb/influxdb/cluster"
|
"github.com/influxdb/influxdb/cluster"
|
||||||
|
"github.com/influxdb/influxdb/meta"
|
||||||
"github.com/influxdb/influxdb/models"
|
"github.com/influxdb/influxdb/models"
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
"github.com/influxdb/influxdb/tsdb"
|
||||||
)
|
)
|
||||||
|
@ -49,6 +50,10 @@ type Service struct {
|
||||||
WritePoints(p *cluster.WritePointsRequest) error
|
WritePoints(p *cluster.WritePointsRequest) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MetaStore interface {
|
||||||
|
CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error)
|
||||||
|
}
|
||||||
|
|
||||||
Logger *log.Logger
|
Logger *log.Logger
|
||||||
statMap *expvar.Map
|
statMap *expvar.Map
|
||||||
}
|
}
|
||||||
|
@ -77,6 +82,10 @@ func (s *Service) Open() (err error) {
|
||||||
return errors.New("database has to be specified in config")
|
return errors.New("database has to be specified in config")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if _, err := s.MetaStore.CreateDatabaseIfNotExists(s.config.Database); err != nil {
|
||||||
|
return errors.New("Failed to ensure target database exists")
|
||||||
|
}
|
||||||
|
|
||||||
s.addr, err = net.ResolveUDPAddr("udp", s.config.BindAddress)
|
s.addr, err = net.ResolveUDPAddr("udp", s.config.BindAddress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.Logger.Printf("Failed to resolve UDP address %s: %s", s.config.BindAddress, err)
|
s.Logger.Printf("Failed to resolve UDP address %s: %s", s.config.BindAddress, err)
|
||||||
|
|
|
@ -309,9 +309,11 @@ var TableHeader = React.createClass({
|
||||||
|
|
||||||
var TableBody = React.createClass({
|
var TableBody = React.createClass({
|
||||||
render: function() {
|
render: function() {
|
||||||
|
if (this.props.data.values) {
|
||||||
var tableRows = this.props.data.values.map(function (row) {
|
var tableRows = this.props.data.values.map(function (row) {
|
||||||
return React.createElement(TableRow, {data: row});
|
return React.createElement(TableRow, {data: row});
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
return React.createElement("tbody", null, tableRows);
|
return React.createElement("tbody", null, tableRows);
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -190,6 +190,9 @@ func (s *series) writeInterval(i int, start time.Time) time.Time {
|
||||||
|
|
||||||
if s.Jitter {
|
if s.Jitter {
|
||||||
j = rand.Intn(int(tick))
|
j = rand.Intn(int(tick))
|
||||||
|
if j%2 == 0 {
|
||||||
|
j = -2 * j
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tick = tick*time.Duration(i) + time.Duration(j)
|
tick = tick*time.Duration(i) + time.Duration(j)
|
||||||
|
|
|
@ -8,23 +8,20 @@ package tsm1
|
||||||
import "encoding/binary"
|
import "encoding/binary"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// boolUncompressed is an uncompressed boolean format
|
// boolUncompressed is an uncompressed boolean format.
|
||||||
|
// Not yet implemented.
|
||||||
boolUncompressed = 0
|
boolUncompressed = 0
|
||||||
|
|
||||||
// boolCompressedBitPacked is an bit packed format using 1 bit per boolean
|
// boolCompressedBitPacked is an bit packed format using 1 bit per boolean
|
||||||
boolCompressedBitPacked = 1
|
boolCompressedBitPacked = 1
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// BoolEncoder encodes a series of bools to an in-memory buffer.
|
||||||
type BoolEncoder interface {
|
type BoolEncoder interface {
|
||||||
Write(b bool)
|
Write(b bool)
|
||||||
Bytes() ([]byte, error)
|
Bytes() ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type BoolDecoder interface {
|
|
||||||
Next() bool
|
|
||||||
Read() bool
|
|
||||||
Error() error
|
|
||||||
}
|
|
||||||
|
|
||||||
type boolEncoder struct {
|
type boolEncoder struct {
|
||||||
// The encoded bytes
|
// The encoded bytes
|
||||||
bytes []byte
|
bytes []byte
|
||||||
|
@ -39,6 +36,7 @@ type boolEncoder struct {
|
||||||
n int
|
n int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewBoolEncoder returns a new instance of BoolEncoder.
|
||||||
func NewBoolEncoder() BoolEncoder {
|
func NewBoolEncoder() BoolEncoder {
|
||||||
return &boolEncoder{}
|
return &boolEncoder{}
|
||||||
}
|
}
|
||||||
|
@ -57,16 +55,16 @@ func (e *boolEncoder) Write(b bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Increment the current bool count
|
// Increment the current bool count
|
||||||
e.i += 1
|
e.i++
|
||||||
// Increment the total bool count
|
// Increment the total bool count
|
||||||
e.n += 1
|
e.n++
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *boolEncoder) flush() {
|
func (e *boolEncoder) flush() {
|
||||||
// Pad remaining byte w/ 0s
|
// Pad remaining byte w/ 0s
|
||||||
for e.i < 8 {
|
for e.i < 8 {
|
||||||
e.b = e.b << 1
|
e.b = e.b << 1
|
||||||
e.i += 1
|
e.i++
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we have bits set, append them to the byte slice
|
// If we have bits set, append them to the byte slice
|
||||||
|
@ -93,6 +91,13 @@ func (e *boolEncoder) Bytes() ([]byte, error) {
|
||||||
return append(b[:i], e.bytes...), nil
|
return append(b[:i], e.bytes...), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BoolDecoder decodes a series of bools from an in-memory buffer.
|
||||||
|
type BoolDecoder interface {
|
||||||
|
Next() bool
|
||||||
|
Read() bool
|
||||||
|
Error() error
|
||||||
|
}
|
||||||
|
|
||||||
type boolDecoder struct {
|
type boolDecoder struct {
|
||||||
b []byte
|
b []byte
|
||||||
i int
|
i int
|
||||||
|
@ -100,6 +105,7 @@ type boolDecoder struct {
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewBoolDecoder returns a new instance of BoolDecoder.
|
||||||
func NewBoolDecoder(b []byte) BoolDecoder {
|
func NewBoolDecoder(b []byte) BoolDecoder {
|
||||||
// First byte stores the encoding type, only have 1 bit-packet format
|
// First byte stores the encoding type, only have 1 bit-packet format
|
||||||
// currently ignore for now.
|
// currently ignore for now.
|
||||||
|
@ -109,7 +115,7 @@ func NewBoolDecoder(b []byte) BoolDecoder {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *boolDecoder) Next() bool {
|
func (e *boolDecoder) Next() bool {
|
||||||
e.i += 1
|
e.i++
|
||||||
return e.i < e.n
|
return e.i < e.n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
34
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/bool_test.go
generated
vendored
34
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/bool_test.go
generated
vendored
|
@ -1,7 +1,9 @@
|
||||||
package tsm1_test
|
package tsm1_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
"testing/quick"
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/tsdb/engine/tsm1"
|
"github.com/influxdb/influxdb/tsdb/engine/tsm1"
|
||||||
)
|
)
|
||||||
|
@ -71,3 +73,35 @@ func Test_BoolEncoder_Multi_Compressed(t *testing.T) {
|
||||||
t.Fatalf("unexpected next value: got true, exp false")
|
t.Fatalf("unexpected next value: got true, exp false")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_BoolEncoder_Quick(t *testing.T) {
|
||||||
|
if err := quick.Check(func(values []bool) bool {
|
||||||
|
// Write values to encoder.
|
||||||
|
enc := tsm1.NewBoolEncoder()
|
||||||
|
for _, v := range values {
|
||||||
|
enc.Write(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve compressed bytes.
|
||||||
|
buf, err := enc.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read values out of decoder.
|
||||||
|
got := make([]bool, 0, len(values))
|
||||||
|
dec := tsm1.NewBoolDecoder(buf)
|
||||||
|
for dec.Next() {
|
||||||
|
got = append(got, dec.Read())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that input and output values match.
|
||||||
|
if !reflect.DeepEqual(values, got) {
|
||||||
|
t.Fatalf("mismatch:\n\nexp=%+v\n\ngot=%+v\n\n", values, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}, nil); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -20,6 +20,8 @@ type combinedEngineCursor struct {
|
||||||
ascending bool
|
ascending bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewCombinedEngineCursor returns a Cursor that joins wc and ec.
|
||||||
|
// Values from wc take precedence over ec when identical timestamps are returned.
|
||||||
func NewCombinedEngineCursor(wc, ec tsdb.Cursor, ascending bool) tsdb.Cursor {
|
func NewCombinedEngineCursor(wc, ec tsdb.Cursor, ascending bool) tsdb.Cursor {
|
||||||
return &combinedEngineCursor{
|
return &combinedEngineCursor{
|
||||||
walCursor: wc,
|
walCursor: wc,
|
||||||
|
@ -105,6 +107,7 @@ type multiFieldCursor struct {
|
||||||
valueBuffer []interface{}
|
valueBuffer []interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewMultiFieldCursor returns an instance of Cursor that joins the results of cursors.
|
||||||
func NewMultiFieldCursor(fields []string, cursors []tsdb.Cursor, ascending bool) tsdb.Cursor {
|
func NewMultiFieldCursor(fields []string, cursors []tsdb.Cursor, ascending bool) tsdb.Cursor {
|
||||||
return &multiFieldCursor{
|
return &multiFieldCursor{
|
||||||
fields: fields,
|
fields: fields,
|
||||||
|
|
204
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/cursor_test.go
generated
vendored
Normal file
204
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/cursor_test.go
generated
vendored
Normal file
|
@ -0,0 +1,204 @@
|
||||||
|
package tsm1_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
"testing/quick"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/tsdb"
|
||||||
|
"github.com/influxdb/influxdb/tsdb/engine/tsm1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCombinedEngineCursor_Quick(t *testing.T) {
|
||||||
|
const tmin = 0
|
||||||
|
quick.Check(func(wc, ec *Cursor, ascending bool, seek int64) bool {
|
||||||
|
c := tsm1.NewCombinedEngineCursor(wc, ec, ascending)
|
||||||
|
// Read from cursor.
|
||||||
|
got := make([]int64, 0)
|
||||||
|
for k, _ := c.SeekTo(seek); k != tsdb.EOF; k, _ = c.Next() {
|
||||||
|
got = append(got, k)
|
||||||
|
}
|
||||||
|
// Merge cursors items.
|
||||||
|
merged := MergeCursorItems(wc.items, ec.items)
|
||||||
|
if !ascending {
|
||||||
|
sort.Sort(sort.Reverse(CursorItems(merged)))
|
||||||
|
}
|
||||||
|
// Filter out items outside of seek range.
|
||||||
|
exp := make([]int64, 0)
|
||||||
|
for _, item := range merged {
|
||||||
|
if (ascending && item.Key < seek) || (!ascending && item.Key > seek) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
exp = append(exp, item.Key)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(got, exp) {
|
||||||
|
t.Fatalf("mismatch: seek=%v, ascending=%v\n\ngot=%#v\n\nexp=%#v\n\n", seek, ascending, got, exp)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}, &quick.Config{Values: func(values []reflect.Value, rand *rand.Rand) {
|
||||||
|
ascending := rand.Intn(1) == 1
|
||||||
|
values[0] = reflect.ValueOf(GenerateCursor(tmin, 10, ascending, rand))
|
||||||
|
values[1] = reflect.ValueOf(GenerateCursor(tmin, 10, ascending, rand))
|
||||||
|
values[2] = reflect.ValueOf(ascending)
|
||||||
|
values[3] = reflect.ValueOf(rand.Int63n(100))
|
||||||
|
}})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cursor represents a simple test cursor that implements tsdb.Cursor.
|
||||||
|
type Cursor struct {
|
||||||
|
i int
|
||||||
|
items []CursorItem
|
||||||
|
ascending bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCursor returns a new instance of Cursor.
|
||||||
|
func NewCursor(items []CursorItem, ascending bool) *Cursor {
|
||||||
|
c := &Cursor{
|
||||||
|
items: items,
|
||||||
|
ascending: ascending,
|
||||||
|
}
|
||||||
|
// Set initial position depending on cursor direction.
|
||||||
|
if ascending {
|
||||||
|
c.i = -1
|
||||||
|
} else {
|
||||||
|
c.i = len(c.items)
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// CursorItem represents an item in a test cursor.
|
||||||
|
type CursorItem struct {
|
||||||
|
Key int64
|
||||||
|
Value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeekTo moves the cursor to the first key greater than or equal to seek.
|
||||||
|
func (c *Cursor) SeekTo(seek int64) (key int64, value interface{}) {
|
||||||
|
if c.ascending {
|
||||||
|
for i, item := range c.items {
|
||||||
|
if item.Key >= seek {
|
||||||
|
c.i = i
|
||||||
|
return item.Key, item.Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for i := len(c.items) - 1; i >= 0; i-- {
|
||||||
|
if item := c.items[i]; item.Key <= seek {
|
||||||
|
c.i = i
|
||||||
|
return item.Key, item.Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.i = len(c.items)
|
||||||
|
return tsdb.EOF, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next key/value from the cursor.
|
||||||
|
func (c *Cursor) Next() (key int64, value interface{}) {
|
||||||
|
if c.ascending {
|
||||||
|
c.i++
|
||||||
|
if c.i >= len(c.items) {
|
||||||
|
return tsdb.EOF, nil
|
||||||
|
}
|
||||||
|
} else if !c.ascending {
|
||||||
|
c.i--
|
||||||
|
if c.i < 0 {
|
||||||
|
return tsdb.EOF, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return c.items[c.i].Key, c.items[c.i].Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ascending returns true if the cursor moves in ascending order.
|
||||||
|
func (c *Cursor) Ascending() bool { return c.ascending }
|
||||||
|
|
||||||
|
// CursorItems represents a list of CursorItem objects.
|
||||||
|
type CursorItems []CursorItem
|
||||||
|
|
||||||
|
func (a CursorItems) Len() int { return len(a) }
|
||||||
|
func (a CursorItems) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
func (a CursorItems) Less(i, j int) bool { return a[i].Key < a[j].Key }
|
||||||
|
|
||||||
|
// Keys returns a list of keys.
|
||||||
|
func (a CursorItems) Keys() []int64 {
|
||||||
|
keys := make([]int64, len(a))
|
||||||
|
for i := range a {
|
||||||
|
keys[i] = a[i].Key
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateCursor generates a cursor with a random data.
|
||||||
|
func GenerateCursor(tmin, step int64, ascending bool, rand *rand.Rand) *Cursor {
|
||||||
|
key := tmin + rand.Int63n(10)
|
||||||
|
items := make([]CursorItem, 0)
|
||||||
|
for i, n := 0, rand.Intn(100); i < n; i++ {
|
||||||
|
items = append(items, CursorItem{
|
||||||
|
Key: key,
|
||||||
|
Value: int64(0),
|
||||||
|
})
|
||||||
|
key += rand.Int63n(10)
|
||||||
|
}
|
||||||
|
return NewCursor(items, ascending)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeCursorItems merges items in a & b together.
|
||||||
|
// If two items share a timestamp then a takes precendence.
|
||||||
|
func MergeCursorItems(a, b []CursorItem) []CursorItem {
|
||||||
|
items := make([]CursorItem, 0)
|
||||||
|
var ai, bi int
|
||||||
|
for {
|
||||||
|
if ai < len(a) && bi < len(b) {
|
||||||
|
if ak, bk := a[ai].Key, b[bi].Key; ak == bk {
|
||||||
|
items = append(items, a[ai])
|
||||||
|
ai++
|
||||||
|
bi++
|
||||||
|
} else if ak < bk {
|
||||||
|
items = append(items, a[ai])
|
||||||
|
ai++
|
||||||
|
} else {
|
||||||
|
items = append(items, b[bi])
|
||||||
|
bi++
|
||||||
|
}
|
||||||
|
} else if ai < len(a) {
|
||||||
|
items = append(items, a[ai])
|
||||||
|
ai++
|
||||||
|
} else if bi < len(b) {
|
||||||
|
items = append(items, b[bi])
|
||||||
|
bi++
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return items
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAllCursor slurps all values from a cursor.
|
||||||
|
func ReadAllCursor(c tsdb.Cursor) tsm1.Values {
|
||||||
|
var values tsm1.Values
|
||||||
|
for k, v := c.Next(); k != tsdb.EOF; k, v = c.Next() {
|
||||||
|
values = append(values, tsm1.NewValue(time.Unix(0, k).UTC(), v))
|
||||||
|
}
|
||||||
|
return values
|
||||||
|
}
|
||||||
|
|
||||||
|
// DedupeValues returns a list of values with duplicate times removed.
|
||||||
|
func DedupeValues(a tsm1.Values) tsm1.Values {
|
||||||
|
other := make(tsm1.Values, 0, len(a))
|
||||||
|
m := map[int64]struct{}{}
|
||||||
|
|
||||||
|
for i := len(a) - 1; i >= 0; i-- {
|
||||||
|
value := a[i]
|
||||||
|
if _, ok := m[value.UnixNano()]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
other = append(other, value)
|
||||||
|
m[value.UnixNano()] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return other
|
||||||
|
}
|
62
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/encoding.go
generated
vendored
62
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/encoding.go
generated
vendored
|
@ -61,45 +61,33 @@ func (e *EmptyValue) Size() int { return 0 }
|
||||||
// makes the code cleaner.
|
// makes the code cleaner.
|
||||||
type Values []Value
|
type Values []Value
|
||||||
|
|
||||||
func (v Values) MinTime() int64 {
|
func (a Values) MinTime() int64 {
|
||||||
return v[0].Time().UnixNano()
|
return a[0].Time().UnixNano()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v Values) MaxTime() int64 {
|
func (a Values) MaxTime() int64 {
|
||||||
return v[len(v)-1].Time().UnixNano()
|
return a[len(a)-1].Time().UnixNano()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v Values) Encode(buf []byte) ([]byte, error) {
|
// Encode converts the values to a byte slice. If there are no values,
|
||||||
switch v[0].(type) {
|
// this function panics.
|
||||||
|
func (a Values) Encode(buf []byte) ([]byte, error) {
|
||||||
|
if len(a) == 0 {
|
||||||
|
panic("unable to encode block type")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch a[0].(type) {
|
||||||
case *FloatValue:
|
case *FloatValue:
|
||||||
return encodeFloatBlock(buf, v)
|
return encodeFloatBlock(buf, a)
|
||||||
case *Int64Value:
|
case *Int64Value:
|
||||||
return encodeInt64Block(buf, v)
|
return encodeInt64Block(buf, a)
|
||||||
case *BoolValue:
|
case *BoolValue:
|
||||||
return encodeBoolBlock(buf, v)
|
return encodeBoolBlock(buf, a)
|
||||||
case *StringValue:
|
case *StringValue:
|
||||||
return encodeStringBlock(buf, v)
|
return encodeStringBlock(buf, a)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("unsupported value type %T", v[0])
|
return nil, fmt.Errorf("unsupported value type %T", a[0])
|
||||||
}
|
|
||||||
|
|
||||||
func (v Values) DecodeSameTypeBlock(block []byte) Values {
|
|
||||||
switch v[0].(type) {
|
|
||||||
case *FloatValue:
|
|
||||||
a, _ := decodeFloatBlock(block)
|
|
||||||
return a
|
|
||||||
case *Int64Value:
|
|
||||||
a, _ := decodeInt64Block(block)
|
|
||||||
return a
|
|
||||||
case *BoolValue:
|
|
||||||
a, _ := decodeBoolBlock(block)
|
|
||||||
return a
|
|
||||||
case *StringValue:
|
|
||||||
a, _ := decodeStringBlock(block)
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecodeBlock takes a byte array and will decode into values of the appropriate type
|
// DecodeBlock takes a byte array and will decode into values of the appropriate type
|
||||||
|
@ -127,19 +115,19 @@ func DecodeBlock(block []byte) (Values, error) {
|
||||||
// Deduplicate returns a new Values slice with any values
|
// Deduplicate returns a new Values slice with any values
|
||||||
// that have the same timestamp removed. The Value that appears
|
// that have the same timestamp removed. The Value that appears
|
||||||
// last in the slice is the one that is kept. The returned slice is in ascending order
|
// last in the slice is the one that is kept. The returned slice is in ascending order
|
||||||
func (v Values) Deduplicate() Values {
|
func (a Values) Deduplicate() Values {
|
||||||
m := make(map[int64]Value)
|
m := make(map[int64]Value)
|
||||||
for _, val := range v {
|
for _, val := range a {
|
||||||
m[val.UnixNano()] = val
|
m[val.UnixNano()] = val
|
||||||
}
|
}
|
||||||
|
|
||||||
a := make([]Value, 0, len(m))
|
other := make([]Value, 0, len(m))
|
||||||
for _, val := range m {
|
for _, val := range m {
|
||||||
a = append(a, val)
|
other = append(other, val)
|
||||||
}
|
}
|
||||||
sort.Sort(Values(a))
|
sort.Sort(Values(other))
|
||||||
|
|
||||||
return a
|
return other
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort methods
|
// Sort methods
|
||||||
|
@ -352,8 +340,8 @@ func (v *Int64Value) Value() interface{} {
|
||||||
return v.value
|
return v.value
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Int64Value) UnixNano() int64 {
|
func (v *Int64Value) UnixNano() int64 {
|
||||||
return f.time.UnixNano()
|
return v.time.UnixNano()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *Int64Value) Size() int {
|
func (v *Int64Value) Size() int {
|
||||||
|
|
35
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/encoding_test.go
generated
vendored
35
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/encoding_test.go
generated
vendored
|
@ -24,7 +24,10 @@ func TestEncoding_FloatBlock(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
decodedValues := values.DecodeSameTypeBlock(b)
|
decodedValues, err := tsm1.DecodeBlock(b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error decoding block: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(decodedValues, values) {
|
if !reflect.DeepEqual(decodedValues, values) {
|
||||||
t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
|
t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
|
||||||
|
@ -42,7 +45,10 @@ func TestEncoding_FloatBlock_ZeroTime(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
decodedValues := values.DecodeSameTypeBlock(b)
|
decodedValues, err := tsm1.DecodeBlock(b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error decoding block: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(decodedValues, values) {
|
if !reflect.DeepEqual(decodedValues, values) {
|
||||||
t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
|
t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
|
||||||
|
@ -62,7 +68,10 @@ func TestEncoding_FloatBlock_SimilarFloats(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
decodedValues := values.DecodeSameTypeBlock(b)
|
decodedValues, err := tsm1.DecodeBlock(b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error decoding block: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(decodedValues, values) {
|
if !reflect.DeepEqual(decodedValues, values) {
|
||||||
t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
|
t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
|
||||||
|
@ -82,7 +91,10 @@ func TestEncoding_IntBlock_Basic(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
decodedValues := values.DecodeSameTypeBlock(b)
|
decodedValues, err := tsm1.DecodeBlock(b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error decoding block: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if len(decodedValues) != len(values) {
|
if len(decodedValues) != len(values) {
|
||||||
t.Fatalf("unexpected results length:\n\tgot: %v\n\texp: %v\n", len(decodedValues), len(values))
|
t.Fatalf("unexpected results length:\n\tgot: %v\n\texp: %v\n", len(decodedValues), len(values))
|
||||||
|
@ -117,7 +129,10 @@ func TestEncoding_IntBlock_Negatives(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
decodedValues := values.DecodeSameTypeBlock(b)
|
decodedValues, err := tsm1.DecodeBlock(b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error decoding block: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(decodedValues, values) {
|
if !reflect.DeepEqual(decodedValues, values) {
|
||||||
t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
|
t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
|
||||||
|
@ -141,7 +156,10 @@ func TestEncoding_BoolBlock_Basic(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
decodedValues := values.DecodeSameTypeBlock(b)
|
decodedValues, err := tsm1.DecodeBlock(b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error decoding block: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(decodedValues, values) {
|
if !reflect.DeepEqual(decodedValues, values) {
|
||||||
t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
|
t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
|
||||||
|
@ -161,7 +179,10 @@ func TestEncoding_StringBlock_Basic(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
decodedValues := values.DecodeSameTypeBlock(b)
|
decodedValues, err := tsm1.DecodeBlock(b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error decoding block: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(decodedValues, values) {
|
if !reflect.DeepEqual(decodedValues, values) {
|
||||||
t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
|
t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values)
|
||||||
|
|
|
@ -18,8 +18,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// floatUncompressed is an uncompressed format using 8 bytes per value
|
// floatUncompressed is an uncompressed format using 8 bytes per value.
|
||||||
|
// Not yet implemented.
|
||||||
floatUncompressed = 0
|
floatUncompressed = 0
|
||||||
|
|
||||||
// floatCompressedGorilla is a compressed format using the gorilla paper encoding
|
// floatCompressedGorilla is a compressed format using the gorilla paper encoding
|
||||||
floatCompressedGorilla = 1
|
floatCompressedGorilla = 1
|
||||||
)
|
)
|
||||||
|
@ -154,6 +156,13 @@ func (it *FloatDecoder) Next() bool {
|
||||||
|
|
||||||
if it.first {
|
if it.first {
|
||||||
it.first = false
|
it.first = false
|
||||||
|
|
||||||
|
// mark as finished if there were no values.
|
||||||
|
if math.IsNaN(it.val) {
|
||||||
|
it.finished = true
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
30
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/float_test.go
generated
vendored
30
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/float_test.go
generated
vendored
|
@ -1,7 +1,9 @@
|
||||||
package tsm1_test
|
package tsm1_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
"testing/quick"
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/tsdb/engine/tsm1"
|
"github.com/influxdb/influxdb/tsdb/engine/tsm1"
|
||||||
)
|
)
|
||||||
|
@ -174,6 +176,34 @@ func TestFloatEncoder_Roundtrip(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_FloatEncoder_Quick(t *testing.T) {
|
||||||
|
quick.Check(func(values []float64) bool {
|
||||||
|
// Write values to encoder.
|
||||||
|
enc := tsm1.NewFloatEncoder()
|
||||||
|
for _, v := range values {
|
||||||
|
enc.Push(v)
|
||||||
|
}
|
||||||
|
enc.Finish()
|
||||||
|
|
||||||
|
// Read values out of decoder.
|
||||||
|
got := make([]float64, 0, len(values))
|
||||||
|
dec, err := tsm1.NewFloatDecoder(enc.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for dec.Next() {
|
||||||
|
got = append(got, dec.Values())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that input and output values match.
|
||||||
|
if !reflect.DeepEqual(values, got) {
|
||||||
|
t.Fatalf("mismatch:\n\nexp=%+v\n\ngot=%+v\n\n", values, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
func BenchmarkFloatEncoder(b *testing.B) {
|
func BenchmarkFloatEncoder(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
s := tsm1.NewFloatEncoder()
|
s := tsm1.NewFloatEncoder()
|
||||||
|
|
|
@ -2,7 +2,7 @@ package tsm1
|
||||||
|
|
||||||
// Int64 encoding uses two different strategies depending on the range of values in
|
// Int64 encoding uses two different strategies depending on the range of values in
|
||||||
// the uncompressed data. Encoded values are first encoding used zig zag encoding.
|
// the uncompressed data. Encoded values are first encoding used zig zag encoding.
|
||||||
// This interleaves postiive and negative integers across a range of positive integers.
|
// This interleaves positive and negative integers across a range of positive integers.
|
||||||
//
|
//
|
||||||
// For example, [-2,-1,0,1] becomes [3,1,0,2]. See
|
// For example, [-2,-1,0,1] becomes [3,1,0,2]. See
|
||||||
// https://developers.google.com/protocol-buffers/docs/encoding?hl=en#signed-integers
|
// https://developers.google.com/protocol-buffers/docs/encoding?hl=en#signed-integers
|
||||||
|
@ -32,6 +32,8 @@ const (
|
||||||
intUncompressed = 0
|
intUncompressed = 0
|
||||||
// intCompressedSimple is a bit-packed format using simple8b encoding
|
// intCompressedSimple is a bit-packed format using simple8b encoding
|
||||||
intCompressedSimple = 1
|
intCompressedSimple = 1
|
||||||
|
// intCompressedRLE is a run-length encoding format
|
||||||
|
intCompressedRLE = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
// Int64Encoder encoders int64 into byte slices
|
// Int64Encoder encoders int64 into byte slices
|
||||||
|
@ -48,18 +50,34 @@ type Int64Decoder interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type int64Encoder struct {
|
type int64Encoder struct {
|
||||||
|
prev int64
|
||||||
|
rle bool
|
||||||
values []uint64
|
values []uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewInt64Encoder() Int64Encoder {
|
func NewInt64Encoder() Int64Encoder {
|
||||||
return &int64Encoder{}
|
return &int64Encoder{rle: true}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *int64Encoder) Write(v int64) {
|
func (e *int64Encoder) Write(v int64) {
|
||||||
e.values = append(e.values, ZigZagEncode(v))
|
// Delta-encode each value as it's written. This happens before
|
||||||
|
// ZigZagEncoding because the deltas could be negative.
|
||||||
|
delta := v - e.prev
|
||||||
|
e.prev = v
|
||||||
|
enc := ZigZagEncode(delta)
|
||||||
|
if len(e.values) > 1 {
|
||||||
|
e.rle = e.rle && e.values[len(e.values)-1] == enc
|
||||||
|
}
|
||||||
|
|
||||||
|
e.values = append(e.values, enc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *int64Encoder) Bytes() ([]byte, error) {
|
func (e *int64Encoder) Bytes() ([]byte, error) {
|
||||||
|
// Only run-length encode if it could be reduce storage size
|
||||||
|
if e.rle && len(e.values) > 2 {
|
||||||
|
return e.encodeRLE()
|
||||||
|
}
|
||||||
|
|
||||||
for _, v := range e.values {
|
for _, v := range e.values {
|
||||||
// Value is too large to encode using packed format
|
// Value is too large to encode using packed format
|
||||||
if v > simple8b.MaxValue {
|
if v > simple8b.MaxValue {
|
||||||
|
@ -70,23 +88,56 @@ func (e *int64Encoder) Bytes() ([]byte, error) {
|
||||||
return e.encodePacked()
|
return e.encodePacked()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *int64Encoder) encodeRLE() ([]byte, error) {
|
||||||
|
// Large varints can take up to 10 bytes
|
||||||
|
b := make([]byte, 1+10*3)
|
||||||
|
|
||||||
|
// 4 high bits used for the encoding type
|
||||||
|
b[0] = byte(intCompressedRLE) << 4
|
||||||
|
|
||||||
|
i := 1
|
||||||
|
// The first value
|
||||||
|
binary.BigEndian.PutUint64(b[i:], e.values[0])
|
||||||
|
i += 8
|
||||||
|
// The first delta
|
||||||
|
i += binary.PutUvarint(b[i:], e.values[1])
|
||||||
|
// The number of times the delta is repeated
|
||||||
|
i += binary.PutUvarint(b[i:], uint64(len(e.values)-1))
|
||||||
|
|
||||||
|
return b[:i], nil
|
||||||
|
}
|
||||||
|
|
||||||
func (e *int64Encoder) encodePacked() ([]byte, error) {
|
func (e *int64Encoder) encodePacked() ([]byte, error) {
|
||||||
encoded, err := simple8b.EncodeAll(e.values)
|
if len(e.values) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode all but the first value. Fist value is written unencoded
|
||||||
|
// using 8 bytes.
|
||||||
|
encoded, err := simple8b.EncodeAll(e.values[1:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
b := make([]byte, 1+len(encoded)*8)
|
b := make([]byte, 1+(len(encoded)+1)*8)
|
||||||
// 4 high bits of first byte store the encoding type for the block
|
// 4 high bits of first byte store the encoding type for the block
|
||||||
b[0] = byte(intCompressedSimple) << 4
|
b[0] = byte(intCompressedSimple) << 4
|
||||||
|
|
||||||
|
// Write the first value since it's not part of the encoded values
|
||||||
|
binary.BigEndian.PutUint64(b[1:9], e.values[0])
|
||||||
|
|
||||||
|
// Write the encoded values
|
||||||
for i, v := range encoded {
|
for i, v := range encoded {
|
||||||
binary.BigEndian.PutUint64(b[1+i*8:1+i*8+8], v)
|
binary.BigEndian.PutUint64(b[9+i*8:9+i*8+8], v)
|
||||||
}
|
}
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *int64Encoder) encodeUncompressed() ([]byte, error) {
|
func (e *int64Encoder) encodeUncompressed() ([]byte, error) {
|
||||||
|
if len(e.values) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
b := make([]byte, 1+len(e.values)*8)
|
b := make([]byte, 1+len(e.values)*8)
|
||||||
// 4 high bits of first byte store the encoding type for the block
|
// 4 high bits of first byte store the encoding type for the block
|
||||||
b[0] = byte(intUncompressed) << 4
|
b[0] = byte(intUncompressed) << 4
|
||||||
|
@ -102,7 +153,14 @@ type int64Decoder struct {
|
||||||
bytes []byte
|
bytes []byte
|
||||||
i int
|
i int
|
||||||
n int
|
n int
|
||||||
|
prev int64
|
||||||
|
first bool
|
||||||
|
|
||||||
|
// The first value for a run-length encoded byte slice
|
||||||
|
rleFirst uint64
|
||||||
|
|
||||||
|
// The delta value for a run-length encoded byte slice
|
||||||
|
rleDelta uint64
|
||||||
encoding byte
|
encoding byte
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
@ -122,6 +180,7 @@ func (d *int64Decoder) SetBytes(b []byte) {
|
||||||
d.encoding = b[0] >> 4
|
d.encoding = b[0] >> 4
|
||||||
d.bytes = b[1:]
|
d.bytes = b[1:]
|
||||||
}
|
}
|
||||||
|
d.first = true
|
||||||
d.i = 0
|
d.i = 0
|
||||||
d.n = 0
|
d.n = 0
|
||||||
}
|
}
|
||||||
|
@ -131,7 +190,7 @@ func (d *int64Decoder) Next() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
d.i += 1
|
d.i++
|
||||||
|
|
||||||
if d.i >= d.n {
|
if d.i >= d.n {
|
||||||
switch d.encoding {
|
switch d.encoding {
|
||||||
|
@ -139,6 +198,8 @@ func (d *int64Decoder) Next() bool {
|
||||||
d.decodeUncompressed()
|
d.decodeUncompressed()
|
||||||
case intCompressedSimple:
|
case intCompressedSimple:
|
||||||
d.decodePacked()
|
d.decodePacked()
|
||||||
|
case intCompressedRLE:
|
||||||
|
d.decodeRLE()
|
||||||
default:
|
default:
|
||||||
d.err = fmt.Errorf("unknown encoding %v", d.encoding)
|
d.err = fmt.Errorf("unknown encoding %v", d.encoding)
|
||||||
}
|
}
|
||||||
|
@ -151,7 +212,48 @@ func (d *int64Decoder) Error() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *int64Decoder) Read() int64 {
|
func (d *int64Decoder) Read() int64 {
|
||||||
return ZigZagDecode(d.values[d.i])
|
switch d.encoding {
|
||||||
|
case intCompressedRLE:
|
||||||
|
return ZigZagDecode(d.rleFirst + uint64(d.i)*d.rleDelta)
|
||||||
|
default:
|
||||||
|
v := ZigZagDecode(d.values[d.i])
|
||||||
|
// v is the delta encoded value, we need to add the prior value to get the original
|
||||||
|
v = v + d.prev
|
||||||
|
d.prev = v
|
||||||
|
return v
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *int64Decoder) decodeRLE() {
|
||||||
|
if len(d.bytes) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var i, n int
|
||||||
|
|
||||||
|
// Next 8 bytes is the starting value
|
||||||
|
first := binary.BigEndian.Uint64(d.bytes[i : i+8])
|
||||||
|
i += 8
|
||||||
|
|
||||||
|
// Next 1-10 bytes is the delta value
|
||||||
|
value, n := binary.Uvarint(d.bytes[i:])
|
||||||
|
|
||||||
|
i += n
|
||||||
|
|
||||||
|
// Last 1-10 bytes is how many times the value repeats
|
||||||
|
count, n := binary.Uvarint(d.bytes[i:])
|
||||||
|
|
||||||
|
// Store the first value and delta value so we do not need to allocate
|
||||||
|
// a large values slice. We can compute the value at position d.i on
|
||||||
|
// demand.
|
||||||
|
d.rleFirst = first
|
||||||
|
d.rleDelta = value
|
||||||
|
d.n = int(count) + 1
|
||||||
|
d.i = 0
|
||||||
|
|
||||||
|
// We've process all the bytes
|
||||||
|
d.bytes = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *int64Decoder) decodePacked() {
|
func (d *int64Decoder) decodePacked() {
|
||||||
|
@ -160,6 +262,12 @@ func (d *int64Decoder) decodePacked() {
|
||||||
}
|
}
|
||||||
|
|
||||||
v := binary.BigEndian.Uint64(d.bytes[0:8])
|
v := binary.BigEndian.Uint64(d.bytes[0:8])
|
||||||
|
// The first value is always unencoded
|
||||||
|
if d.first {
|
||||||
|
d.first = false
|
||||||
|
d.n = 1
|
||||||
|
d.values[0] = v
|
||||||
|
} else {
|
||||||
n, err := simple8b.Decode(d.values, v)
|
n, err := simple8b.Decode(d.values, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Should never happen, only error that could be returned is if the the value to be decoded was not
|
// Should never happen, only error that could be returned is if the the value to be decoded was not
|
||||||
|
@ -168,11 +276,16 @@ func (d *int64Decoder) decodePacked() {
|
||||||
}
|
}
|
||||||
|
|
||||||
d.n = n
|
d.n = n
|
||||||
|
}
|
||||||
d.i = 0
|
d.i = 0
|
||||||
d.bytes = d.bytes[8:]
|
d.bytes = d.bytes[8:]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *int64Decoder) decodeUncompressed() {
|
func (d *int64Decoder) decodeUncompressed() {
|
||||||
|
if len(d.bytes) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
d.values[0] = binary.BigEndian.Uint64(d.bytes[0:8])
|
d.values[0] = binary.BigEndian.Uint64(d.bytes[0:8])
|
||||||
d.i = 0
|
d.i = 0
|
||||||
d.n = 1
|
d.n = 1
|
||||||
|
|
317
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/int_test.go
generated
vendored
317
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/int_test.go
generated
vendored
|
@ -1,27 +1,32 @@
|
||||||
package tsm1_test
|
package tsm1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
|
"math/rand"
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
"testing/quick"
|
||||||
"github.com/influxdb/influxdb/tsdb/engine/tsm1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_Int64Encoder_NoValues(t *testing.T) {
|
func Test_Int64Encoder_NoValues(t *testing.T) {
|
||||||
enc := tsm1.NewInt64Encoder()
|
enc := NewInt64Encoder()
|
||||||
b, err := enc.Bytes()
|
b, err := enc.Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dec := tsm1.NewInt64Decoder(b)
|
if len(b) > 0 {
|
||||||
|
t.Fatalf("unexpected lenght: exp 0, got %v", len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := NewInt64Decoder(b)
|
||||||
if dec.Next() {
|
if dec.Next() {
|
||||||
t.Fatalf("unexpected next value: got true, exp false")
|
t.Fatalf("unexpected next value: got true, exp false")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_Int64Encoder_One(t *testing.T) {
|
func Test_Int64Encoder_One(t *testing.T) {
|
||||||
enc := tsm1.NewInt64Encoder()
|
enc := NewInt64Encoder()
|
||||||
v1 := int64(1)
|
v1 := int64(1)
|
||||||
|
|
||||||
enc.Write(1)
|
enc.Write(1)
|
||||||
|
@ -30,7 +35,11 @@ func Test_Int64Encoder_One(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dec := tsm1.NewInt64Decoder(b)
|
if got := b[0] >> 4; intCompressedSimple != got {
|
||||||
|
t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := NewInt64Decoder(b)
|
||||||
if !dec.Next() {
|
if !dec.Next() {
|
||||||
t.Fatalf("unexpected next value: got true, exp false")
|
t.Fatalf("unexpected next value: got true, exp false")
|
||||||
}
|
}
|
||||||
|
@ -41,7 +50,7 @@ func Test_Int64Encoder_One(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_Int64Encoder_Two(t *testing.T) {
|
func Test_Int64Encoder_Two(t *testing.T) {
|
||||||
enc := tsm1.NewInt64Encoder()
|
enc := NewInt64Encoder()
|
||||||
var v1, v2 int64 = 1, 2
|
var v1, v2 int64 = 1, 2
|
||||||
|
|
||||||
enc.Write(v1)
|
enc.Write(v1)
|
||||||
|
@ -52,7 +61,11 @@ func Test_Int64Encoder_Two(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dec := tsm1.NewInt64Decoder(b)
|
if got := b[0] >> 4; intCompressedSimple != got {
|
||||||
|
t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := NewInt64Decoder(b)
|
||||||
if !dec.Next() {
|
if !dec.Next() {
|
||||||
t.Fatalf("unexpected next value: got true, exp false")
|
t.Fatalf("unexpected next value: got true, exp false")
|
||||||
}
|
}
|
||||||
|
@ -71,7 +84,7 @@ func Test_Int64Encoder_Two(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_Int64Encoder_Negative(t *testing.T) {
|
func Test_Int64Encoder_Negative(t *testing.T) {
|
||||||
enc := tsm1.NewInt64Encoder()
|
enc := NewInt64Encoder()
|
||||||
var v1, v2, v3 int64 = -2, 0, 1
|
var v1, v2, v3 int64 = -2, 0, 1
|
||||||
|
|
||||||
enc.Write(v1)
|
enc.Write(v1)
|
||||||
|
@ -83,7 +96,11 @@ func Test_Int64Encoder_Negative(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dec := tsm1.NewInt64Decoder(b)
|
if got := b[0] >> 4; intCompressedSimple != got {
|
||||||
|
t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := NewInt64Decoder(b)
|
||||||
if !dec.Next() {
|
if !dec.Next() {
|
||||||
t.Fatalf("unexpected next value: got true, exp false")
|
t.Fatalf("unexpected next value: got true, exp false")
|
||||||
}
|
}
|
||||||
|
@ -110,7 +127,7 @@ func Test_Int64Encoder_Negative(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_Int64Encoder_Large_Range(t *testing.T) {
|
func Test_Int64Encoder_Large_Range(t *testing.T) {
|
||||||
enc := tsm1.NewInt64Encoder()
|
enc := NewInt64Encoder()
|
||||||
var v1, v2 int64 = math.MinInt64, math.MaxInt64
|
var v1, v2 int64 = math.MinInt64, math.MaxInt64
|
||||||
enc.Write(v1)
|
enc.Write(v1)
|
||||||
enc.Write(v2)
|
enc.Write(v2)
|
||||||
|
@ -119,7 +136,11 @@ func Test_Int64Encoder_Large_Range(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dec := tsm1.NewInt64Decoder(b)
|
if got := b[0] >> 4; intUncompressed != got {
|
||||||
|
t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := NewInt64Decoder(b)
|
||||||
if !dec.Next() {
|
if !dec.Next() {
|
||||||
t.Fatalf("unexpected next value: got true, exp false")
|
t.Fatalf("unexpected next value: got true, exp false")
|
||||||
}
|
}
|
||||||
|
@ -138,7 +159,7 @@ func Test_Int64Encoder_Large_Range(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_Int64Encoder_Uncompressed(t *testing.T) {
|
func Test_Int64Encoder_Uncompressed(t *testing.T) {
|
||||||
enc := tsm1.NewInt64Encoder()
|
enc := NewInt64Encoder()
|
||||||
var v1, v2, v3 int64 = 0, 1, 1 << 60
|
var v1, v2, v3 int64 = 0, 1, 1 << 60
|
||||||
|
|
||||||
enc.Write(v1)
|
enc.Write(v1)
|
||||||
|
@ -155,7 +176,11 @@ func Test_Int64Encoder_Uncompressed(t *testing.T) {
|
||||||
t.Fatalf("length mismatch: got %v, exp %v", len(b), exp)
|
t.Fatalf("length mismatch: got %v, exp %v", len(b), exp)
|
||||||
}
|
}
|
||||||
|
|
||||||
dec := tsm1.NewInt64Decoder(b)
|
if got := b[0] >> 4; intUncompressed != got {
|
||||||
|
t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := NewInt64Decoder(b)
|
||||||
if !dec.Next() {
|
if !dec.Next() {
|
||||||
t.Fatalf("unexpected next value: got true, exp false")
|
t.Fatalf("unexpected next value: got true, exp false")
|
||||||
}
|
}
|
||||||
|
@ -181,8 +206,52 @@ func Test_Int64Encoder_Uncompressed(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_Int64Encoder_NegativeUncompressed(t *testing.T) {
|
||||||
|
values := []int64{
|
||||||
|
-2352281900722994752, 1438442655375607923, -4110452567888190110,
|
||||||
|
-1221292455668011702, -1941700286034261841, -2836753127140407751,
|
||||||
|
1432686216250034552, 3663244026151507025, -3068113732684750258,
|
||||||
|
-1949953187327444488, 3713374280993588804, 3226153669854871355,
|
||||||
|
-2093273755080502606, 1006087192578600616, -2272122301622271655,
|
||||||
|
2533238229511593671, -4450454445568858273, 2647789901083530435,
|
||||||
|
2761419461769776844, -1324397441074946198, -680758138988210958,
|
||||||
|
94468846694902125, -2394093124890745254, -2682139311758778198,
|
||||||
|
}
|
||||||
|
enc := NewInt64Encoder()
|
||||||
|
for _, v := range values {
|
||||||
|
enc.Write(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := enc.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got := b[0] >> 4; intUncompressed != got {
|
||||||
|
t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := NewInt64Decoder(b)
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for dec.Next() {
|
||||||
|
if i > len(values) {
|
||||||
|
t.Fatalf("read too many values: got %v, exp %v", i, len(values))
|
||||||
|
}
|
||||||
|
|
||||||
|
if values[i] != dec.Read() {
|
||||||
|
t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), values[i])
|
||||||
|
}
|
||||||
|
i += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if i != len(values) {
|
||||||
|
t.Fatalf("failed to read enough values: got %v, exp %v", i, len(values))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func Test_Int64Encoder_AllNegative(t *testing.T) {
|
func Test_Int64Encoder_AllNegative(t *testing.T) {
|
||||||
enc := tsm1.NewInt64Encoder()
|
enc := NewInt64Encoder()
|
||||||
values := []int64{
|
values := []int64{
|
||||||
-10, -5, -1,
|
-10, -5, -1,
|
||||||
}
|
}
|
||||||
|
@ -196,7 +265,11 @@ func Test_Int64Encoder_AllNegative(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dec := tsm1.NewInt64Decoder(b)
|
if got := b[0] >> 4; intCompressedSimple != got {
|
||||||
|
t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := NewInt64Decoder(b)
|
||||||
i := 0
|
i := 0
|
||||||
for dec.Next() {
|
for dec.Next() {
|
||||||
if i > len(values) {
|
if i > len(values) {
|
||||||
|
@ -208,10 +281,174 @@ func Test_Int64Encoder_AllNegative(t *testing.T) {
|
||||||
}
|
}
|
||||||
i += 1
|
i += 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if i != len(values) {
|
||||||
|
t.Fatalf("failed to read enough values: got %v, exp %v", i, len(values))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkInt64Encoder(b *testing.B) {
|
func Test_Int64Encoder_CounterPacked(t *testing.T) {
|
||||||
enc := tsm1.NewInt64Encoder()
|
enc := NewInt64Encoder()
|
||||||
|
values := []int64{
|
||||||
|
1e15, 1e15 + 1, 1e15 + 2, 1e15 + 3, 1e15 + 4, 1e15 + 6,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range values {
|
||||||
|
enc.Write(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := enc.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b[0]>>4 != intCompressedSimple {
|
||||||
|
t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should use 1 header byte + 2, 8 byte words if delta-encoding is used based on
|
||||||
|
// values sizes. Without delta-encoding, we'd get 49 bytes.
|
||||||
|
if exp := 17; len(b) != exp {
|
||||||
|
t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := NewInt64Decoder(b)
|
||||||
|
i := 0
|
||||||
|
for dec.Next() {
|
||||||
|
if i > len(values) {
|
||||||
|
t.Fatalf("read too many values: got %v, exp %v", i, len(values))
|
||||||
|
}
|
||||||
|
|
||||||
|
if values[i] != dec.Read() {
|
||||||
|
t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), values[i])
|
||||||
|
}
|
||||||
|
i += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if i != len(values) {
|
||||||
|
t.Fatalf("failed to read enough values: got %v, exp %v", i, len(values))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_Int64Encoder_CounterRLE(t *testing.T) {
|
||||||
|
enc := NewInt64Encoder()
|
||||||
|
values := []int64{
|
||||||
|
1e15, 1e15 + 1, 1e15 + 2, 1e15 + 3, 1e15 + 4, 1e15 + 5,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range values {
|
||||||
|
enc.Write(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := enc.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b[0]>>4 != intCompressedRLE {
|
||||||
|
t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should use 1 header byte, 8 byte first value, 1 var-byte for delta and 1 var-byte for
|
||||||
|
// count of deltas in this particular RLE.
|
||||||
|
if exp := 11; len(b) != exp {
|
||||||
|
t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := NewInt64Decoder(b)
|
||||||
|
i := 0
|
||||||
|
for dec.Next() {
|
||||||
|
if i > len(values) {
|
||||||
|
t.Fatalf("read too many values: got %v, exp %v", i, len(values))
|
||||||
|
}
|
||||||
|
|
||||||
|
if values[i] != dec.Read() {
|
||||||
|
t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), values[i])
|
||||||
|
}
|
||||||
|
i += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if i != len(values) {
|
||||||
|
t.Fatalf("failed to read enough values: got %v, exp %v", i, len(values))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_Int64Encoder_MinMax(t *testing.T) {
|
||||||
|
enc := NewInt64Encoder()
|
||||||
|
values := []int64{
|
||||||
|
math.MinInt64, math.MaxInt64,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range values {
|
||||||
|
enc.Write(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := enc.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b[0]>>4 != intUncompressed {
|
||||||
|
t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp := 17; len(b) != exp {
|
||||||
|
t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := NewInt64Decoder(b)
|
||||||
|
i := 0
|
||||||
|
for dec.Next() {
|
||||||
|
if i > len(values) {
|
||||||
|
t.Fatalf("read too many values: got %v, exp %v", i, len(values))
|
||||||
|
}
|
||||||
|
|
||||||
|
if values[i] != dec.Read() {
|
||||||
|
t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), values[i])
|
||||||
|
}
|
||||||
|
i += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if i != len(values) {
|
||||||
|
t.Fatalf("failed to read enough values: got %v, exp %v", i, len(values))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_Int64Encoder_Quick(t *testing.T) {
|
||||||
|
quick.Check(func(values []int64) bool {
|
||||||
|
// Write values to encoder.
|
||||||
|
enc := NewInt64Encoder()
|
||||||
|
for _, v := range values {
|
||||||
|
enc.Write(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve encoded bytes from encoder.
|
||||||
|
buf, err := enc.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read values out of decoder.
|
||||||
|
got := make([]int64, 0, len(values))
|
||||||
|
dec := NewInt64Decoder(buf)
|
||||||
|
for dec.Next() {
|
||||||
|
if err := dec.Error(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
got = append(got, dec.Read())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that input and output values match.
|
||||||
|
if !reflect.DeepEqual(values, got) {
|
||||||
|
t.Fatalf("mismatch:\n\nexp=%+v\n\ngot=%+v\n\n", values, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkInt64EncoderRLE(b *testing.B) {
|
||||||
|
enc := NewInt64Encoder()
|
||||||
x := make([]int64, 1024)
|
x := make([]int64, 1024)
|
||||||
for i := 0; i < len(x); i++ {
|
for i := 0; i < len(x); i++ {
|
||||||
x[i] = int64(i)
|
x[i] = int64(i)
|
||||||
|
@ -224,13 +461,49 @@ func BenchmarkInt64Encoder(b *testing.B) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkInt64EncoderPackedSimple(b *testing.B) {
|
||||||
|
enc := NewInt64Encoder()
|
||||||
|
x := make([]int64, 1024)
|
||||||
|
for i := 0; i < len(x); i++ {
|
||||||
|
// Small amount of randomness prevents RLE from being used
|
||||||
|
x[i] = int64(i) + int64(rand.Intn(10))
|
||||||
|
enc.Write(x[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
enc.Bytes()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type byteSetter interface {
|
type byteSetter interface {
|
||||||
SetBytes(b []byte)
|
SetBytes(b []byte)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkInt64Decoder(b *testing.B) {
|
func BenchmarkInt64DecoderPackedSimple(b *testing.B) {
|
||||||
x := make([]int64, 1024)
|
x := make([]int64, 1024)
|
||||||
enc := tsm1.NewInt64Encoder()
|
enc := NewInt64Encoder()
|
||||||
|
for i := 0; i < len(x); i++ {
|
||||||
|
// Small amount of randomness prevents RLE from being used
|
||||||
|
x[i] = int64(i) + int64(rand.Intn(10))
|
||||||
|
enc.Write(x[i])
|
||||||
|
}
|
||||||
|
bytes, _ := enc.Bytes()
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
dec := NewInt64Decoder(bytes)
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
dec.(byteSetter).SetBytes(bytes)
|
||||||
|
for dec.Next() {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkInt64DecoderRLE(b *testing.B) {
|
||||||
|
x := make([]int64, 1024)
|
||||||
|
enc := NewInt64Encoder()
|
||||||
for i := 0; i < len(x); i++ {
|
for i := 0; i < len(x); i++ {
|
||||||
x[i] = int64(i)
|
x[i] = int64(i)
|
||||||
enc.Write(x[i])
|
enc.Write(x[i])
|
||||||
|
@ -239,7 +512,7 @@ func BenchmarkInt64Decoder(b *testing.B) {
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
dec := tsm1.NewInt64Decoder(bytes)
|
dec := NewInt64Decoder(bytes)
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
dec.(byteSetter).SetBytes(bytes)
|
dec.(byteSetter).SetBytes(bytes)
|
||||||
|
|
|
@ -13,8 +13,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// stringUncompressed is a an uncompressed format encoding strings as raw bytes
|
// stringUncompressed is a an uncompressed format encoding strings as raw bytes.
|
||||||
|
// Not yet implemented.
|
||||||
stringUncompressed = 0
|
stringUncompressed = 0
|
||||||
|
|
||||||
// stringCompressedSnappy is a compressed encoding using Snappy compression
|
// stringCompressedSnappy is a compressed encoding using Snappy compression
|
||||||
stringCompressedSnappy = 1
|
stringCompressedSnappy = 1
|
||||||
)
|
)
|
||||||
|
|
38
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/string_test.go
generated
vendored
38
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/string_test.go
generated
vendored
|
@ -2,7 +2,9 @@ package tsm1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
"testing/quick"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_StringEncoder_NoValues(t *testing.T) {
|
func Test_StringEncoder_NoValues(t *testing.T) {
|
||||||
|
@ -83,3 +85,39 @@ func Test_StringEncoder_Multi_Compressed(t *testing.T) {
|
||||||
t.Fatalf("unexpected next value: got true, exp false")
|
t.Fatalf("unexpected next value: got true, exp false")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_StringEncoder_Quick(t *testing.T) {
|
||||||
|
quick.Check(func(values []string) bool {
|
||||||
|
// Write values to encoder.
|
||||||
|
enc := NewStringEncoder()
|
||||||
|
for _, v := range values {
|
||||||
|
enc.Write(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve encoded bytes from encoder.
|
||||||
|
buf, err := enc.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read values out of decoder.
|
||||||
|
got := make([]string, 0, len(values))
|
||||||
|
dec, err := NewStringDecoder(buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for dec.Next() {
|
||||||
|
if err := dec.Error(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
got = append(got, dec.Read())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that input and output values match.
|
||||||
|
if !reflect.DeepEqual(values, got) {
|
||||||
|
t.Fatalf("mismatch:\n\nexp=%+v\n\ngot=%+v\n\n", values, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
|
8
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/timestamp.go
generated
vendored
8
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/timestamp.go
generated
vendored
|
@ -56,7 +56,7 @@ type TimeEncoder interface {
|
||||||
Bytes() ([]byte, error)
|
Bytes() ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TimeEncoder decodes byte slices to time.Time values.
|
// TimeDecoder decodes byte slices to time.Time values.
|
||||||
type TimeDecoder interface {
|
type TimeDecoder interface {
|
||||||
Next() bool
|
Next() bool
|
||||||
Read() time.Time
|
Read() time.Time
|
||||||
|
@ -124,7 +124,7 @@ func (e *encoder) Bytes() ([]byte, error) {
|
||||||
max, div, rle, dts := e.reduce()
|
max, div, rle, dts := e.reduce()
|
||||||
|
|
||||||
// The deltas are all the same, so we can run-length encode them
|
// The deltas are all the same, so we can run-length encode them
|
||||||
if rle && len(e.ts) > 60 {
|
if rle && len(e.ts) > 1 {
|
||||||
return e.encodeRLE(e.ts[0], e.ts[1], div, len(e.ts))
|
return e.encodeRLE(e.ts[0], e.ts[1], div, len(e.ts))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -264,7 +264,7 @@ func (d *decoder) decodeRLE(b []byte) {
|
||||||
|
|
||||||
// Lower 4 bits hold the 10 based exponent so we can scale the values back up
|
// Lower 4 bits hold the 10 based exponent so we can scale the values back up
|
||||||
mod := int64(math.Pow10(int(b[i] & 0xF)))
|
mod := int64(math.Pow10(int(b[i] & 0xF)))
|
||||||
i += 1
|
i++
|
||||||
|
|
||||||
// Next 8 bytes is the starting timestamp
|
// Next 8 bytes is the starting timestamp
|
||||||
first := binary.BigEndian.Uint64(b[i : i+8])
|
first := binary.BigEndian.Uint64(b[i : i+8])
|
||||||
|
@ -278,7 +278,7 @@ func (d *decoder) decodeRLE(b []byte) {
|
||||||
i += n
|
i += n
|
||||||
|
|
||||||
// Last 1-10 bytes is how many times the value repeats
|
// Last 1-10 bytes is how many times the value repeats
|
||||||
count, n := binary.Uvarint(b[i:])
|
count, _ := binary.Uvarint(b[i:])
|
||||||
|
|
||||||
// Rebuild construct the original values now
|
// Rebuild construct the original values now
|
||||||
deltas := make([]uint64, count)
|
deltas := make([]uint64, count)
|
||||||
|
|
99
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/timestamp_test.go
generated
vendored
99
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/timestamp_test.go
generated
vendored
|
@ -1,7 +1,9 @@
|
||||||
package tsm1
|
package tsm1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
"testing/quick"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -22,8 +24,8 @@ func Test_TimeEncoder(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if got := b[0] >> 4; got != timeCompressedPackedSimple {
|
if got := b[0] >> 4; got != timeCompressedRLE {
|
||||||
t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got)
|
t.Fatalf("Wrong encoding used: expected rle, got %v", got)
|
||||||
}
|
}
|
||||||
|
|
||||||
dec := NewTimeDecoder(b)
|
dec := NewTimeDecoder(b)
|
||||||
|
@ -87,8 +89,8 @@ func Test_TimeEncoder_Two(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if got := b[0] >> 4; got != timeCompressedPackedSimple {
|
if got := b[0] >> 4; got != timeCompressedRLE {
|
||||||
t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got)
|
t.Fatalf("Wrong encoding used: expected rle, got %v", got)
|
||||||
}
|
}
|
||||||
|
|
||||||
dec := NewTimeDecoder(b)
|
dec := NewTimeDecoder(b)
|
||||||
|
@ -113,7 +115,7 @@ func Test_TimeEncoder_Three(t *testing.T) {
|
||||||
enc := NewTimeEncoder()
|
enc := NewTimeEncoder()
|
||||||
t1 := time.Unix(0, 0)
|
t1 := time.Unix(0, 0)
|
||||||
t2 := time.Unix(0, 1)
|
t2 := time.Unix(0, 1)
|
||||||
t3 := time.Unix(0, 2)
|
t3 := time.Unix(0, 3)
|
||||||
|
|
||||||
enc.Write(t1)
|
enc.Write(t1)
|
||||||
enc.Write(t2)
|
enc.Write(t2)
|
||||||
|
@ -125,7 +127,7 @@ func Test_TimeEncoder_Three(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if got := b[0] >> 4; got != timeCompressedPackedSimple {
|
if got := b[0] >> 4; got != timeCompressedPackedSimple {
|
||||||
t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got)
|
t.Fatalf("Wrong encoding used: expected rle, got %v", got)
|
||||||
}
|
}
|
||||||
|
|
||||||
dec := NewTimeDecoder(b)
|
dec := NewTimeDecoder(b)
|
||||||
|
@ -165,8 +167,8 @@ func Test_TimeEncoder_Large_Range(t *testing.T) {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if got := b[0] >> 4; got != timeCompressedPackedSimple {
|
if got := b[0] >> 4; got != timeCompressedRLE {
|
||||||
t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got)
|
t.Fatalf("Wrong encoding used: expected rle, got %v", got)
|
||||||
}
|
}
|
||||||
|
|
||||||
dec := NewTimeDecoder(b)
|
dec := NewTimeDecoder(b)
|
||||||
|
@ -283,7 +285,7 @@ func Test_TimeEncoder_Reverse(t *testing.T) {
|
||||||
ts := []time.Time{
|
ts := []time.Time{
|
||||||
time.Unix(0, 3),
|
time.Unix(0, 3),
|
||||||
time.Unix(0, 2),
|
time.Unix(0, 2),
|
||||||
time.Unix(0, 1),
|
time.Unix(0, 0),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, v := range ts {
|
for _, v := range ts {
|
||||||
|
@ -305,7 +307,7 @@ func Test_TimeEncoder_Reverse(t *testing.T) {
|
||||||
if ts[i] != dec.Read() {
|
if ts[i] != dec.Read() {
|
||||||
t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), ts[i])
|
t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), ts[i])
|
||||||
}
|
}
|
||||||
i += 1
|
i++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,7 +343,7 @@ func Test_TimeEncoder_220SecondDelta(t *testing.T) {
|
||||||
if ts[i] != dec.Read() {
|
if ts[i] != dec.Read() {
|
||||||
t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), ts[i])
|
t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), ts[i])
|
||||||
}
|
}
|
||||||
i += 1
|
i++
|
||||||
}
|
}
|
||||||
|
|
||||||
if i != len(ts) {
|
if i != len(ts) {
|
||||||
|
@ -353,6 +355,81 @@ func Test_TimeEncoder_220SecondDelta(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_TimeEncoder_Quick(t *testing.T) {
|
||||||
|
quick.Check(func(values []int64) bool {
|
||||||
|
// Write values to encoder.
|
||||||
|
enc := NewTimeEncoder()
|
||||||
|
exp := make([]time.Time, len(values))
|
||||||
|
for i, v := range values {
|
||||||
|
exp[i] = time.Unix(0, v)
|
||||||
|
enc.Write(exp[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve encoded bytes from encoder.
|
||||||
|
buf, err := enc.Bytes()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read values out of decoder.
|
||||||
|
got := make([]time.Time, 0, len(values))
|
||||||
|
dec := NewTimeDecoder(buf)
|
||||||
|
for dec.Next() {
|
||||||
|
if err := dec.Error(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
got = append(got, dec.Read())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that input and output values match.
|
||||||
|
if !reflect.DeepEqual(exp, got) {
|
||||||
|
t.Fatalf("mismatch:\n\nexp=%+v\n\ngot=%+v\n\n", exp, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_TimeEncoder_RLESeconds(t *testing.T) {
|
||||||
|
enc := NewTimeEncoder()
|
||||||
|
ts := make([]time.Time, 6)
|
||||||
|
|
||||||
|
ts[0] = time.Unix(0, 1444448158000000000)
|
||||||
|
ts[1] = time.Unix(0, 1444448168000000000)
|
||||||
|
ts[2] = time.Unix(0, 1444448178000000000)
|
||||||
|
ts[3] = time.Unix(0, 1444448188000000000)
|
||||||
|
ts[4] = time.Unix(0, 1444448198000000000)
|
||||||
|
ts[5] = time.Unix(0, 1444448208000000000)
|
||||||
|
|
||||||
|
for _, v := range ts {
|
||||||
|
enc.Write(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := enc.Bytes()
|
||||||
|
if got := b[0] >> 4; got != timeCompressedRLE {
|
||||||
|
t.Fatalf("Wrong encoding used: expected rle, got %v", got)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := NewTimeDecoder(b)
|
||||||
|
for i, v := range ts {
|
||||||
|
if !dec.Next() {
|
||||||
|
t.Fatalf("Next == false, expected true")
|
||||||
|
}
|
||||||
|
|
||||||
|
if v != dec.Read() {
|
||||||
|
t.Fatalf("Item %d mismatch, got %v, exp %v", i, dec.Read(), v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if dec.Next() {
|
||||||
|
t.Fatalf("unexpected extra values")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
func BenchmarkTimeEncoder(b *testing.B) {
|
func BenchmarkTimeEncoder(b *testing.B) {
|
||||||
enc := NewTimeEncoder()
|
enc := NewTimeEncoder()
|
||||||
x := make([]time.Time, 1024)
|
x := make([]time.Time, 1024)
|
||||||
|
|
|
@ -161,7 +161,7 @@ func NewEngine(path string, walPath string, opt tsdb.EngineOptions) tsdb.Engine
|
||||||
MaxPointsPerBlock: DefaultMaxPointsPerBlock,
|
MaxPointsPerBlock: DefaultMaxPointsPerBlock,
|
||||||
RotateBlockSize: DefaultRotateBlockSize,
|
RotateBlockSize: DefaultRotateBlockSize,
|
||||||
}
|
}
|
||||||
e.WAL.Index = e
|
e.WAL.IndexWriter = e
|
||||||
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
@ -313,7 +313,7 @@ func (e *Engine) LoadMetadataIndex(shard *tsdb.Shard, index *tsdb.DatabaseIndex,
|
||||||
}
|
}
|
||||||
for k, mf := range fields {
|
for k, mf := range fields {
|
||||||
m := index.CreateMeasurementIndexIfNotExists(string(k))
|
m := index.CreateMeasurementIndexIfNotExists(string(k))
|
||||||
for name, _ := range mf.Fields {
|
for name := range mf.Fields {
|
||||||
m.SetFieldName(name)
|
m.SetFieldName(name)
|
||||||
}
|
}
|
||||||
mf.Codec = tsdb.NewFieldCodec(mf.Fields)
|
mf.Codec = tsdb.NewFieldCodec(mf.Fields)
|
||||||
|
@ -329,7 +329,7 @@ func (e *Engine) LoadMetadataIndex(shard *tsdb.Shard, index *tsdb.DatabaseIndex,
|
||||||
// Load the series into the in-memory index in sorted order to ensure
|
// Load the series into the in-memory index in sorted order to ensure
|
||||||
// it's always consistent for testing purposes
|
// it's always consistent for testing purposes
|
||||||
a := make([]string, 0, len(series))
|
a := make([]string, 0, len(series))
|
||||||
for k, _ := range series {
|
for k := range series {
|
||||||
a = append(a, k)
|
a = append(a, k)
|
||||||
}
|
}
|
||||||
sort.Strings(a)
|
sort.Strings(a)
|
||||||
|
@ -357,7 +357,7 @@ func (e *Engine) Write(pointsByKey map[string]Values, measurementFieldsToSave ma
|
||||||
e.flushDeletes()
|
e.flushDeletes()
|
||||||
}
|
}
|
||||||
|
|
||||||
err, startTime, endTime, valuesByID := e.convertKeysAndWriteMetadata(pointsByKey, measurementFieldsToSave, seriesToCreate)
|
startTime, endTime, valuesByID, err := e.convertKeysAndWriteMetadata(pointsByKey, measurementFieldsToSave, seriesToCreate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -576,8 +576,8 @@ func (e *Engine) Compact(fullCompaction bool) error {
|
||||||
positions[i] = 4
|
positions[i] = 4
|
||||||
}
|
}
|
||||||
currentPosition := uint32(fileHeaderSize)
|
currentPosition := uint32(fileHeaderSize)
|
||||||
newPositions := make([]uint32, 0)
|
var newPositions []uint32
|
||||||
newIDs := make([]uint64, 0)
|
var newIDs []uint64
|
||||||
buf := make([]byte, e.RotateBlockSize)
|
buf := make([]byte, e.RotateBlockSize)
|
||||||
for {
|
for {
|
||||||
// find the min ID so we can write it to the file
|
// find the min ID so we can write it to the file
|
||||||
|
@ -614,7 +614,11 @@ func (e *Engine) Compact(fullCompaction bool) error {
|
||||||
for {
|
for {
|
||||||
// write the values, the block or combine with previous
|
// write the values, the block or combine with previous
|
||||||
if len(previousValues) > 0 {
|
if len(previousValues) > 0 {
|
||||||
previousValues = append(previousValues, previousValues.DecodeSameTypeBlock(block)...)
|
decoded, err := DecodeBlock(block)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("failure decoding block: %v", err))
|
||||||
|
}
|
||||||
|
previousValues = append(previousValues, decoded...)
|
||||||
} else if len(block) > e.RotateBlockSize {
|
} else if len(block) > e.RotateBlockSize {
|
||||||
if _, err := f.Write(df.mmap[pos:newPos]); err != nil {
|
if _, err := f.Write(df.mmap[pos:newPos]); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -804,30 +808,30 @@ func (e *Engine) filesToCompact() dataFiles {
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Engine) convertKeysAndWriteMetadata(pointsByKey map[string]Values, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) (err error, minTime, maxTime int64, valuesByID map[uint64]Values) {
|
func (e *Engine) convertKeysAndWriteMetadata(pointsByKey map[string]Values, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) (minTime, maxTime int64, valuesByID map[uint64]Values, err error) {
|
||||||
e.metaLock.Lock()
|
e.metaLock.Lock()
|
||||||
defer e.metaLock.Unlock()
|
defer e.metaLock.Unlock()
|
||||||
|
|
||||||
if err := e.writeNewFields(measurementFieldsToSave); err != nil {
|
if err := e.writeNewFields(measurementFieldsToSave); err != nil {
|
||||||
return err, 0, 0, nil
|
return 0, 0, nil, err
|
||||||
}
|
}
|
||||||
if err := e.writeNewSeries(seriesToCreate); err != nil {
|
if err := e.writeNewSeries(seriesToCreate); err != nil {
|
||||||
return err, 0, 0, nil
|
return 0, 0, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pointsByKey) == 0 {
|
if len(pointsByKey) == 0 {
|
||||||
return nil, 0, 0, nil
|
return 0, 0, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// read in keys and assign any that aren't defined
|
// read in keys and assign any that aren't defined
|
||||||
b, err := e.readCompressedFile(IDsFileExtension)
|
b, err := e.readCompressedFile(IDsFileExtension)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err, 0, 0, nil
|
return 0, 0, nil, err
|
||||||
}
|
}
|
||||||
ids := make(map[string]uint64)
|
ids := make(map[string]uint64)
|
||||||
if b != nil {
|
if b != nil {
|
||||||
if err := json.Unmarshal(b, &ids); err != nil {
|
if err := json.Unmarshal(b, &ids); err != nil {
|
||||||
return err, 0, 0, nil
|
return 0, 0, nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -888,10 +892,10 @@ func (e *Engine) convertKeysAndWriteMetadata(pointsByKey map[string]Values, meas
|
||||||
if newKeys {
|
if newKeys {
|
||||||
b, err := json.Marshal(ids)
|
b, err := json.Marshal(ids)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err, 0, 0, nil
|
return 0, 0, nil, err
|
||||||
}
|
}
|
||||||
if err := e.replaceCompressedFile(IDsFileExtension, b); err != nil {
|
if err := e.replaceCompressedFile(IDsFileExtension, b); err != nil {
|
||||||
return err, 0, 0, nil
|
return 0, 0, nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -989,7 +993,7 @@ func (e *Engine) rewriteFile(oldDF *dataFile, valuesByID map[uint64]Values) erro
|
||||||
// we need the values in sorted order so that we can merge them into the
|
// we need the values in sorted order so that we can merge them into the
|
||||||
// new file as we read the old file
|
// new file as we read the old file
|
||||||
ids := make([]uint64, 0, len(valuesByID))
|
ids := make([]uint64, 0, len(valuesByID))
|
||||||
for id, _ := range valuesByID {
|
for id := range valuesByID {
|
||||||
ids = append(ids, id)
|
ids = append(ids, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1015,7 +1019,7 @@ func (e *Engine) rewriteFile(oldDF *dataFile, valuesByID map[uint64]Values) erro
|
||||||
}
|
}
|
||||||
|
|
||||||
// add any ids that are in the file that aren't getting flushed here
|
// add any ids that are in the file that aren't getting flushed here
|
||||||
for id, _ := range oldIDToPosition {
|
for id := range oldIDToPosition {
|
||||||
if _, ok := valuesByID[id]; !ok {
|
if _, ok := valuesByID[id]; !ok {
|
||||||
ids = append(ids, id)
|
ids = append(ids, id)
|
||||||
}
|
}
|
||||||
|
@ -1065,7 +1069,7 @@ func (e *Engine) rewriteFile(oldDF *dataFile, valuesByID map[uint64]Values) erro
|
||||||
currentPosition += (12 + length)
|
currentPosition += (12 + length)
|
||||||
|
|
||||||
// make sure we're not at the end of the file
|
// make sure we're not at the end of the file
|
||||||
if fpos >= oldDF.size {
|
if fpos >= oldDF.indexPosition() {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1191,7 +1195,7 @@ func (e *Engine) flushDeletes() error {
|
||||||
measurements := make(map[string]bool)
|
measurements := make(map[string]bool)
|
||||||
deletes := make(map[uint64]string)
|
deletes := make(map[uint64]string)
|
||||||
e.filesLock.RLock()
|
e.filesLock.RLock()
|
||||||
for name, _ := range e.deleteMeasurements {
|
for name := range e.deleteMeasurements {
|
||||||
measurements[name] = true
|
measurements[name] = true
|
||||||
}
|
}
|
||||||
for id, key := range e.deletes {
|
for id, key := range e.deletes {
|
||||||
|
@ -1205,7 +1209,7 @@ func (e *Engine) flushDeletes() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for name, _ := range measurements {
|
for name := range measurements {
|
||||||
delete(fields, name)
|
delete(fields, name)
|
||||||
}
|
}
|
||||||
if err := e.writeFields(fields); err != nil {
|
if err := e.writeFields(fields); err != nil {
|
||||||
|
@ -1239,10 +1243,10 @@ func (e *Engine) flushDeletes() error {
|
||||||
e.files = newFiles
|
e.files = newFiles
|
||||||
|
|
||||||
// remove the things we've deleted from the map
|
// remove the things we've deleted from the map
|
||||||
for name, _ := range measurements {
|
for name := range measurements {
|
||||||
delete(e.deleteMeasurements, name)
|
delete(e.deleteMeasurements, name)
|
||||||
}
|
}
|
||||||
for id, _ := range deletes {
|
for id := range deletes {
|
||||||
delete(e.deletes, id)
|
delete(e.deletes, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1264,8 +1268,8 @@ func (e *Engine) writeNewFileExcludeDeletes(oldDF *dataFile) *dataFile {
|
||||||
panic(fmt.Sprintf("error opening new data file: %s", err.Error()))
|
panic(fmt.Sprintf("error opening new data file: %s", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
ids := make([]uint64, 0)
|
var ids []uint64
|
||||||
positions := make([]uint32, 0)
|
var positions []uint32
|
||||||
|
|
||||||
indexPosition := oldDF.indexPosition()
|
indexPosition := oldDF.indexPosition()
|
||||||
currentPosition := uint32(fileHeaderSize)
|
currentPosition := uint32(fileHeaderSize)
|
||||||
|
@ -1350,7 +1354,7 @@ func (e *Engine) keysWithFields(fields map[string]*tsdb.MeasurementFields, keys
|
||||||
e.WAL.cacheLock.RLock()
|
e.WAL.cacheLock.RLock()
|
||||||
defer e.WAL.cacheLock.RUnlock()
|
defer e.WAL.cacheLock.RUnlock()
|
||||||
|
|
||||||
a := make([]string, 0)
|
var a []string
|
||||||
for _, k := range keys {
|
for _, k := range keys {
|
||||||
measurement := tsdb.MeasurementFromSeriesKey(k)
|
measurement := tsdb.MeasurementFromSeriesKey(k)
|
||||||
|
|
||||||
|
@ -1645,7 +1649,10 @@ func (e *Engine) readSeries() (map[string]*tsdb.Series, error) {
|
||||||
// has future encoded blocks so that this method can know how much of its values can be
|
// has future encoded blocks so that this method can know how much of its values can be
|
||||||
// combined and output in the resulting encoded block.
|
// combined and output in the resulting encoded block.
|
||||||
func (e *Engine) DecodeAndCombine(newValues Values, block, buf []byte, nextTime int64, hasFutureBlock bool) (Values, []byte, error) {
|
func (e *Engine) DecodeAndCombine(newValues Values, block, buf []byte, nextTime int64, hasFutureBlock bool) (Values, []byte, error) {
|
||||||
values := newValues.DecodeSameTypeBlock(block)
|
values, err := DecodeBlock(block)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("failure decoding block: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
var remainingValues Values
|
var remainingValues Values
|
||||||
|
|
||||||
|
@ -1855,7 +1862,7 @@ func (d *dataFile) MaxTime() int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dataFile) SeriesCount() uint32 {
|
func (d *dataFile) SeriesCount() uint32 {
|
||||||
return btou32(d.mmap[d.size-4:])
|
return btou32(d.mmap[d.size-seriesCountSize:])
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dataFile) IDToPosition() map[uint64]uint32 {
|
func (d *dataFile) IDToPosition() map[uint64]uint32 {
|
||||||
|
|
108
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/tsm1_test.go
generated
vendored
108
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/tsm1_test.go
generated
vendored
|
@ -18,7 +18,7 @@ import (
|
||||||
|
|
||||||
func TestEngine_WriteAndReadFloats(t *testing.T) {
|
func TestEngine_WriteAndReadFloats(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
p1 := parsePoint("cpu,host=A value=1.1 1000000000")
|
p1 := parsePoint("cpu,host=A value=1.1 1000000000")
|
||||||
p2 := parsePoint("cpu,host=B value=1.2 1000000000")
|
p2 := parsePoint("cpu,host=B value=1.2 1000000000")
|
||||||
|
@ -64,7 +64,7 @@ func TestEngine_WriteAndReadFloats(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if checkSingleBVal {
|
if checkSingleBVal {
|
||||||
k, v = c.Next()
|
k, _ = c.Next()
|
||||||
if k != tsdb.EOF {
|
if k != tsdb.EOF {
|
||||||
t.Fatal("expected EOF")
|
t.Fatal("expected EOF")
|
||||||
}
|
}
|
||||||
|
@ -113,7 +113,7 @@ func TestEngine_WriteAndReadFloats(t *testing.T) {
|
||||||
}
|
}
|
||||||
tx.Rollback()
|
tx.Rollback()
|
||||||
|
|
||||||
if err := e.Close(); err != nil {
|
if err := e.Engine.Close(); err != nil {
|
||||||
t.Fatalf("error closing: %s", err.Error())
|
t.Fatalf("error closing: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -129,7 +129,7 @@ func TestEngine_WriteIndexWithCollision(t *testing.T) {
|
||||||
|
|
||||||
func TestEngine_WriteIndexQueryAcrossDataFiles(t *testing.T) {
|
func TestEngine_WriteIndexQueryAcrossDataFiles(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
e.RotateFileSize = 10
|
e.RotateFileSize = 10
|
||||||
|
|
||||||
|
@ -191,7 +191,7 @@ func TestEngine_WriteIndexQueryAcrossDataFiles(t *testing.T) {
|
||||||
|
|
||||||
func TestEngine_WriteOverwritePreviousPoint(t *testing.T) {
|
func TestEngine_WriteOverwritePreviousPoint(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
fields := []string{"value"}
|
fields := []string{"value"}
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ func TestEngine_WriteOverwritePreviousPoint(t *testing.T) {
|
||||||
if 1.3 != v {
|
if 1.3 != v {
|
||||||
t.Fatalf("data wrong:\n\texp:%f\n\tgot:%f", 1.3, v.(float64))
|
t.Fatalf("data wrong:\n\texp:%f\n\tgot:%f", 1.3, v.(float64))
|
||||||
}
|
}
|
||||||
k, v = c.Next()
|
k, _ = c.Next()
|
||||||
if k != tsdb.EOF {
|
if k != tsdb.EOF {
|
||||||
t.Fatal("expected EOF")
|
t.Fatal("expected EOF")
|
||||||
}
|
}
|
||||||
|
@ -240,7 +240,7 @@ func TestEngine_WriteOverwritePreviousPoint(t *testing.T) {
|
||||||
|
|
||||||
func TestEngine_CursorCombinesWALAndIndex(t *testing.T) {
|
func TestEngine_CursorCombinesWALAndIndex(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
fields := []string{"value"}
|
fields := []string{"value"}
|
||||||
|
|
||||||
|
@ -272,7 +272,7 @@ func TestEngine_CursorCombinesWALAndIndex(t *testing.T) {
|
||||||
if 1.2 != v {
|
if 1.2 != v {
|
||||||
t.Fatalf("data wrong:\n\texp:%f\n\tgot:%f", 1.2, v.(float64))
|
t.Fatalf("data wrong:\n\texp:%f\n\tgot:%f", 1.2, v.(float64))
|
||||||
}
|
}
|
||||||
k, v = c.Next()
|
k, _ = c.Next()
|
||||||
if k != tsdb.EOF {
|
if k != tsdb.EOF {
|
||||||
t.Fatal("expected EOF")
|
t.Fatal("expected EOF")
|
||||||
}
|
}
|
||||||
|
@ -280,7 +280,7 @@ func TestEngine_CursorCombinesWALAndIndex(t *testing.T) {
|
||||||
|
|
||||||
func TestEngine_Compaction(t *testing.T) {
|
func TestEngine_Compaction(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
e.RotateFileSize = 10
|
e.RotateFileSize = 10
|
||||||
|
|
||||||
|
@ -348,7 +348,7 @@ func TestEngine_Compaction(t *testing.T) {
|
||||||
|
|
||||||
verify("cpu,host=A", []models.Point{p1, p3, p5, p7}, 0)
|
verify("cpu,host=A", []models.Point{p1, p3, p5, p7}, 0)
|
||||||
verify("cpu,host=B", []models.Point{p2, p4, p6, p8}, 0)
|
verify("cpu,host=B", []models.Point{p2, p4, p6, p8}, 0)
|
||||||
if err := e.Close(); err != nil {
|
if err := e.Engine.Close(); err != nil {
|
||||||
t.Fatalf("error closing: %s", err.Error())
|
t.Fatalf("error closing: %s", err.Error())
|
||||||
}
|
}
|
||||||
if err := e.Open(); err != nil {
|
if err := e.Open(); err != nil {
|
||||||
|
@ -361,7 +361,7 @@ func TestEngine_Compaction(t *testing.T) {
|
||||||
// Ensure that if two keys have the same fnv64-a id, we handle it
|
// Ensure that if two keys have the same fnv64-a id, we handle it
|
||||||
func TestEngine_KeyCollisionsAreHandled(t *testing.T) {
|
func TestEngine_KeyCollisionsAreHandled(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
fields := []string{"value"}
|
fields := []string{"value"}
|
||||||
|
|
||||||
|
@ -416,7 +416,7 @@ func TestEngine_KeyCollisionsAreHandled(t *testing.T) {
|
||||||
verify("cpu,host=C", []models.Point{p3, p6}, 0)
|
verify("cpu,host=C", []models.Point{p3, p6}, 0)
|
||||||
|
|
||||||
// verify collisions are handled after closing and reopening
|
// verify collisions are handled after closing and reopening
|
||||||
if err := e.Close(); err != nil {
|
if err := e.Engine.Close(); err != nil {
|
||||||
t.Fatalf("error closing: %s", err.Error())
|
t.Fatalf("error closing: %s", err.Error())
|
||||||
}
|
}
|
||||||
if err := e.Open(); err != nil {
|
if err := e.Open(); err != nil {
|
||||||
|
@ -442,7 +442,7 @@ func TestEngine_KeyCollisionsAreHandled(t *testing.T) {
|
||||||
|
|
||||||
func TestEngine_SupportMultipleFields(t *testing.T) {
|
func TestEngine_SupportMultipleFields(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
fields := []string{"value", "foo"}
|
fields := []string{"value", "foo"}
|
||||||
|
|
||||||
|
@ -605,7 +605,7 @@ func TestEngine_SupportMultipleFields(t *testing.T) {
|
||||||
|
|
||||||
func TestEngine_WriteManyPointsToSingleSeries(t *testing.T) {
|
func TestEngine_WriteManyPointsToSingleSeries(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
fields := []string{"value"}
|
fields := []string{"value"}
|
||||||
|
|
||||||
|
@ -641,7 +641,7 @@ func TestEngine_WriteManyPointsToSingleSeries(t *testing.T) {
|
||||||
|
|
||||||
func TestEngine_WritePointsInMultipleRequestsWithSameTime(t *testing.T) {
|
func TestEngine_WritePointsInMultipleRequestsWithSameTime(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
fields := []string{"value"}
|
fields := []string{"value"}
|
||||||
|
|
||||||
|
@ -676,7 +676,7 @@ func TestEngine_WritePointsInMultipleRequestsWithSameTime(t *testing.T) {
|
||||||
|
|
||||||
verify()
|
verify()
|
||||||
|
|
||||||
if err := e.Close(); err != nil {
|
if err := e.Engine.Close(); err != nil {
|
||||||
t.Fatalf("error closing: %s", err.Error())
|
t.Fatalf("error closing: %s", err.Error())
|
||||||
}
|
}
|
||||||
if err := e.Open(); err != nil {
|
if err := e.Open(); err != nil {
|
||||||
|
@ -688,7 +688,7 @@ func TestEngine_WritePointsInMultipleRequestsWithSameTime(t *testing.T) {
|
||||||
|
|
||||||
func TestEngine_CursorDescendingOrder(t *testing.T) {
|
func TestEngine_CursorDescendingOrder(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
fields := []string{"value"}
|
fields := []string{"value"}
|
||||||
|
|
||||||
|
@ -763,7 +763,7 @@ func TestEngine_CursorDescendingOrder(t *testing.T) {
|
||||||
|
|
||||||
func TestEngine_CompactWithSeriesInOneFile(t *testing.T) {
|
func TestEngine_CompactWithSeriesInOneFile(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
fields := []string{"value"}
|
fields := []string{"value"}
|
||||||
|
|
||||||
|
@ -844,7 +844,7 @@ func TestEngine_CompactWithSeriesInOneFile(t *testing.T) {
|
||||||
if k != 3000000000 {
|
if k != 3000000000 {
|
||||||
t.Fatalf("expected time 3000000000 but got %d", k)
|
t.Fatalf("expected time 3000000000 but got %d", k)
|
||||||
}
|
}
|
||||||
k, v = c.Next()
|
k, _ = c.Next()
|
||||||
if k != 4000000000 {
|
if k != 4000000000 {
|
||||||
t.Fatalf("expected time 3000000000 but got %d", k)
|
t.Fatalf("expected time 3000000000 but got %d", k)
|
||||||
}
|
}
|
||||||
|
@ -854,7 +854,7 @@ func TestEngine_CompactWithSeriesInOneFile(t *testing.T) {
|
||||||
// skip decoding and just get copied over to the new data file works.
|
// skip decoding and just get copied over to the new data file works.
|
||||||
func TestEngine_CompactionWithCopiedBlocks(t *testing.T) {
|
func TestEngine_CompactionWithCopiedBlocks(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
fields := []string{"value"}
|
fields := []string{"value"}
|
||||||
|
|
||||||
|
@ -932,7 +932,7 @@ func TestEngine_CompactionWithCopiedBlocks(t *testing.T) {
|
||||||
|
|
||||||
func TestEngine_RewritingOldBlocks(t *testing.T) {
|
func TestEngine_RewritingOldBlocks(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
fields := []string{"value"}
|
fields := []string{"value"}
|
||||||
|
|
||||||
|
@ -976,7 +976,7 @@ func TestEngine_RewritingOldBlocks(t *testing.T) {
|
||||||
|
|
||||||
func TestEngine_WriteIntoCompactedFile(t *testing.T) {
|
func TestEngine_WriteIntoCompactedFile(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
fields := []string{"value"}
|
fields := []string{"value"}
|
||||||
|
|
||||||
|
@ -1043,7 +1043,7 @@ func TestEngine_WriteIntoCompactedFile(t *testing.T) {
|
||||||
|
|
||||||
func TestEngine_DuplicatePointsInWalAndIndex(t *testing.T) {
|
func TestEngine_DuplicatePointsInWalAndIndex(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
fields := []string{"value"}
|
fields := []string{"value"}
|
||||||
p1 := parsePoint("cpu,host=A value=1.1 1000000000")
|
p1 := parsePoint("cpu,host=A value=1.1 1000000000")
|
||||||
|
@ -1073,7 +1073,7 @@ func TestEngine_DuplicatePointsInWalAndIndex(t *testing.T) {
|
||||||
|
|
||||||
func TestEngine_Deletes(t *testing.T) {
|
func TestEngine_Deletes(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
fields := []string{"value"}
|
fields := []string{"value"}
|
||||||
// Create metadata.
|
// Create metadata.
|
||||||
|
@ -1153,7 +1153,7 @@ func TestEngine_Deletes(t *testing.T) {
|
||||||
// the wal flushes to the index. To verify that the delete gets
|
// the wal flushes to the index. To verify that the delete gets
|
||||||
// persisted and will go all the way through the index
|
// persisted and will go all the way through the index
|
||||||
|
|
||||||
if err := e.Close(); err != nil {
|
if err := e.Engine.Close(); err != nil {
|
||||||
t.Fatalf("error closing: %s", err.Error())
|
t.Fatalf("error closing: %s", err.Error())
|
||||||
}
|
}
|
||||||
if err := e.Open(); err != nil {
|
if err := e.Open(); err != nil {
|
||||||
|
@ -1179,7 +1179,7 @@ func TestEngine_Deletes(t *testing.T) {
|
||||||
verify()
|
verify()
|
||||||
|
|
||||||
// open and close to verify thd delete was persisted
|
// open and close to verify thd delete was persisted
|
||||||
if err := e.Close(); err != nil {
|
if err := e.Engine.Close(); err != nil {
|
||||||
t.Fatalf("error closing: %s", err.Error())
|
t.Fatalf("error closing: %s", err.Error())
|
||||||
}
|
}
|
||||||
if err := e.Open(); err != nil {
|
if err := e.Open(); err != nil {
|
||||||
|
@ -1218,7 +1218,7 @@ func TestEngine_Deletes(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// open and close to verify thd delete was persisted
|
// open and close to verify thd delete was persisted
|
||||||
if err := e.Close(); err != nil {
|
if err := e.Engine.Close(); err != nil {
|
||||||
t.Fatalf("error closing: %s", err.Error())
|
t.Fatalf("error closing: %s", err.Error())
|
||||||
}
|
}
|
||||||
if err := e.Open(); err != nil {
|
if err := e.Open(); err != nil {
|
||||||
|
@ -1238,7 +1238,7 @@ func TestEngine_Deletes(t *testing.T) {
|
||||||
|
|
||||||
func TestEngine_IndexGoodAfterFlush(t *testing.T) {
|
func TestEngine_IndexGoodAfterFlush(t *testing.T) {
|
||||||
e := OpenDefaultEngine()
|
e := OpenDefaultEngine()
|
||||||
defer e.Cleanup()
|
defer e.Close()
|
||||||
|
|
||||||
fields := []string{"value"}
|
fields := []string{"value"}
|
||||||
|
|
||||||
|
@ -1305,6 +1305,54 @@ func TestEngine_IndexGoodAfterFlush(t *testing.T) {
|
||||||
verify()
|
verify()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure that when rewriting an index file with values in a
|
||||||
|
// series not in the file doesn't cause corruption on compaction
|
||||||
|
func TestEngine_RewriteFileAndCompact(t *testing.T) {
|
||||||
|
e := OpenDefaultEngine()
|
||||||
|
defer e.Engine.Close()
|
||||||
|
|
||||||
|
fields := []string{"value"}
|
||||||
|
|
||||||
|
e.RotateFileSize = 10
|
||||||
|
|
||||||
|
p1 := parsePoint("cpu,host=A value=1.1 1000000000")
|
||||||
|
p2 := parsePoint("cpu,host=A value=1.2 2000000000")
|
||||||
|
p3 := parsePoint("cpu,host=A value=1.3 3000000000")
|
||||||
|
p4 := parsePoint("cpu,host=A value=1.5 4000000000")
|
||||||
|
p5 := parsePoint("cpu,host=A value=1.6 5000000000")
|
||||||
|
p6 := parsePoint("cpu,host=B value=2.1 2000000000")
|
||||||
|
|
||||||
|
if err := e.WritePoints([]models.Point{p1, p2}, nil, nil); err != nil {
|
||||||
|
t.Fatalf("failed to write points: %s", err.Error())
|
||||||
|
}
|
||||||
|
if err := e.WritePoints([]models.Point{p3}, nil, nil); err != nil {
|
||||||
|
t.Fatalf("failed to write points: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := e.WritePoints([]models.Point{p4, p5, p6}, nil, nil); err != nil {
|
||||||
|
t.Fatalf("failed to write points: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := e.Compact(true); err != nil {
|
||||||
|
t.Fatalf("error compacting: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func() {
|
||||||
|
tx, _ := e.Begin(false)
|
||||||
|
defer tx.Rollback()
|
||||||
|
c := tx.Cursor("cpu,host=A", fields, nil, true)
|
||||||
|
k, _ := c.SeekTo(0)
|
||||||
|
if k != p1.UnixNano() {
|
||||||
|
t.Fatalf("wrong time %d", k)
|
||||||
|
}
|
||||||
|
c = tx.Cursor("cpu,host=B", fields, nil, true)
|
||||||
|
k, _ = c.SeekTo(0)
|
||||||
|
if k != p6.UnixNano() {
|
||||||
|
t.Fatalf("wrong time %d", k)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
// Engine represents a test wrapper for tsm1.Engine.
|
// Engine represents a test wrapper for tsm1.Engine.
|
||||||
type Engine struct {
|
type Engine struct {
|
||||||
*tsm1.Engine
|
*tsm1.Engine
|
||||||
|
@ -1339,8 +1387,8 @@ func OpenEngine(opt tsdb.EngineOptions) *Engine {
|
||||||
// OpenDefaultEngine returns an open Engine with default options.
|
// OpenDefaultEngine returns an open Engine with default options.
|
||||||
func OpenDefaultEngine() *Engine { return OpenEngine(tsdb.NewEngineOptions()) }
|
func OpenDefaultEngine() *Engine { return OpenEngine(tsdb.NewEngineOptions()) }
|
||||||
|
|
||||||
// Cleanup closes the engine and removes all data.
|
// Close closes the engine and removes all data.
|
||||||
func (e *Engine) Cleanup() error {
|
func (e *Engine) Close() error {
|
||||||
e.Engine.Close()
|
e.Engine.Close()
|
||||||
os.RemoveAll(e.Path())
|
os.RemoveAll(e.Path())
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -33,8 +33,8 @@ func (t *tx) Cursor(series string, fields []string, dec *tsdb.FieldCodec, ascend
|
||||||
|
|
||||||
// multiple fields. use just the MultiFieldCursor, which also handles time collisions
|
// multiple fields. use just the MultiFieldCursor, which also handles time collisions
|
||||||
// so we don't need to use the combined cursor
|
// so we don't need to use the combined cursor
|
||||||
cursors := make([]tsdb.Cursor, 0)
|
var cursors []tsdb.Cursor
|
||||||
cursorFields := make([]string, 0)
|
var cursorFields []string
|
||||||
for _, field := range fields {
|
for _, field := range fields {
|
||||||
id := t.engine.keyAndFieldToID(series, field)
|
id := t.engine.keyAndFieldToID(series, field)
|
||||||
_, isDeleted := t.engine.deletes[id]
|
_, isDeleted := t.engine.deletes[id]
|
||||||
|
|
|
@ -44,8 +44,6 @@ const (
|
||||||
// idleFlush indicates that we should flush all series in the parition,
|
// idleFlush indicates that we should flush all series in the parition,
|
||||||
// delete all segment files and hold off on opening a new one
|
// delete all segment files and hold off on opening a new one
|
||||||
idleFlush
|
idleFlush
|
||||||
// deleteFlush indicates that we're flushing because series need to be removed from the WAL
|
|
||||||
deleteFlush
|
|
||||||
// startupFlush indicates that we're flushing because the database is starting up
|
// startupFlush indicates that we're flushing because the database is starting up
|
||||||
startupFlush
|
startupFlush
|
||||||
)
|
)
|
||||||
|
@ -63,9 +61,6 @@ const (
|
||||||
type Log struct {
|
type Log struct {
|
||||||
path string
|
path string
|
||||||
|
|
||||||
flushCheckTimer *time.Timer // check this often to see if a background flush should happen
|
|
||||||
flushCheckInterval time.Duration
|
|
||||||
|
|
||||||
// write variables
|
// write variables
|
||||||
writeLock sync.Mutex
|
writeLock sync.Mutex
|
||||||
currentSegmentID int
|
currentSegmentID int
|
||||||
|
@ -100,8 +95,8 @@ type Log struct {
|
||||||
// MaxMemorySizeThreshold specifies the limit at which writes to the WAL should be rejected
|
// MaxMemorySizeThreshold specifies the limit at which writes to the WAL should be rejected
|
||||||
MaxMemorySizeThreshold int
|
MaxMemorySizeThreshold int
|
||||||
|
|
||||||
// Index is the database series will be flushed to
|
// IndexWriter is the database series will be flushed to
|
||||||
Index IndexWriter
|
IndexWriter IndexWriter
|
||||||
|
|
||||||
// LoggingEnabled specifies if detailed logs should be output
|
// LoggingEnabled specifies if detailed logs should be output
|
||||||
LoggingEnabled bool
|
LoggingEnabled bool
|
||||||
|
@ -136,6 +131,9 @@ func NewLog(path string) *Log {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Path returns the path the log was initialized with.
|
||||||
|
func (l *Log) Path() string { return l.path }
|
||||||
|
|
||||||
// Open opens and initializes the Log. Will recover from previous unclosed shutdowns
|
// Open opens and initializes the Log. Will recover from previous unclosed shutdowns
|
||||||
func (l *Log) Open() error {
|
func (l *Log) Open() error {
|
||||||
|
|
||||||
|
@ -383,7 +381,7 @@ func (l *Log) readFileToCache(fileName string) error {
|
||||||
}
|
}
|
||||||
l.addToCache(nil, fields, nil, false)
|
l.addToCache(nil, fields, nil, false)
|
||||||
case seriesEntry:
|
case seriesEntry:
|
||||||
series := make([]*tsdb.SeriesCreate, 0)
|
var series []*tsdb.SeriesCreate
|
||||||
if err := json.Unmarshal(data, &series); err != nil {
|
if err := json.Unmarshal(data, &series); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -393,8 +391,8 @@ func (l *Log) readFileToCache(fileName string) error {
|
||||||
if err := json.Unmarshal(data, &d); err != nil {
|
if err := json.Unmarshal(data, &d); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
l.Index.MarkDeletes(d.Keys)
|
l.IndexWriter.MarkDeletes(d.Keys)
|
||||||
l.Index.MarkMeasurementDelete(d.MeasurementName)
|
l.IndexWriter.MarkMeasurementDelete(d.MeasurementName)
|
||||||
l.deleteKeysFromCache(d.Keys)
|
l.deleteKeysFromCache(d.Keys)
|
||||||
if d.MeasurementName != "" {
|
if d.MeasurementName != "" {
|
||||||
l.deleteMeasurementFromCache(d.MeasurementName)
|
l.deleteMeasurementFromCache(d.MeasurementName)
|
||||||
|
@ -505,28 +503,11 @@ func (l *Log) Close() error {
|
||||||
l.cache = nil
|
l.cache = nil
|
||||||
l.measurementFieldsCache = nil
|
l.measurementFieldsCache = nil
|
||||||
l.seriesToCreateCache = nil
|
l.seriesToCreateCache = nil
|
||||||
if l.currentSegmentFile == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := l.currentSegmentFile.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
l.currentSegmentFile = nil
|
|
||||||
|
|
||||||
return nil
|
if l.currentSegmentFile != nil {
|
||||||
}
|
l.currentSegmentFile.Close()
|
||||||
|
|
||||||
// close all the open Log partitions and file handles
|
|
||||||
func (l *Log) close() error {
|
|
||||||
l.cache = nil
|
|
||||||
l.cacheDirtySort = nil
|
|
||||||
if l.currentSegmentFile == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := l.currentSegmentFile.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
l.currentSegmentFile = nil
|
l.currentSegmentFile = nil
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -578,7 +559,7 @@ func (l *Log) flush(flush flushType) error {
|
||||||
valueCount += len(v)
|
valueCount += len(v)
|
||||||
}
|
}
|
||||||
l.cache = make(map[string]Values)
|
l.cache = make(map[string]Values)
|
||||||
for k, _ := range l.cacheDirtySort {
|
for k := range l.cacheDirtySort {
|
||||||
l.flushCache[k] = l.flushCache[k].Deduplicate()
|
l.flushCache[k] = l.flushCache[k].Deduplicate()
|
||||||
}
|
}
|
||||||
l.cacheDirtySort = make(map[string]bool)
|
l.cacheDirtySort = make(map[string]bool)
|
||||||
|
@ -614,7 +595,7 @@ func (l *Log) flush(flush flushType) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
if err := l.Index.Write(l.flushCache, mfc, scc); err != nil {
|
if err := l.IndexWriter.Write(l.flushCache, mfc, scc); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if l.LoggingEnabled {
|
if l.LoggingEnabled {
|
||||||
|
@ -658,7 +639,7 @@ func (l *Log) segmentFileNames() ([]string, error) {
|
||||||
|
|
||||||
// newSegmentFile will close the current segment file and open a new one, updating bookkeeping info on the log
|
// newSegmentFile will close the current segment file and open a new one, updating bookkeeping info on the log
|
||||||
func (l *Log) newSegmentFile() error {
|
func (l *Log) newSegmentFile() error {
|
||||||
l.currentSegmentID += 1
|
l.currentSegmentID++
|
||||||
if l.currentSegmentFile != nil {
|
if l.currentSegmentFile != nil {
|
||||||
if err := l.currentSegmentFile.Close(); err != nil {
|
if err := l.currentSegmentFile.Close(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
305
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/wal_test.go
generated
vendored
305
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/wal_test.go
generated
vendored
|
@ -2,30 +2,35 @@ package tsm1_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
"testing/quick"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/influxdb/influxdb/influxql"
|
||||||
"github.com/influxdb/influxdb/models"
|
"github.com/influxdb/influxdb/models"
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
"github.com/influxdb/influxdb/tsdb"
|
||||||
"github.com/influxdb/influxdb/tsdb/engine/tsm1"
|
"github.com/influxdb/influxdb/tsdb/engine/tsm1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestWAL_TestWriteQueryOpen(t *testing.T) {
|
func TestLog_TestWriteQueryOpen(t *testing.T) {
|
||||||
w := NewWAL()
|
w := NewLog()
|
||||||
defer w.Cleanup()
|
defer w.Close()
|
||||||
|
|
||||||
|
// Mock call to the index.
|
||||||
var vals map[string]tsm1.Values
|
var vals map[string]tsm1.Values
|
||||||
var fields map[string]*tsdb.MeasurementFields
|
var fields map[string]*tsdb.MeasurementFields
|
||||||
var series []*tsdb.SeriesCreate
|
var series []*tsdb.SeriesCreate
|
||||||
|
w.IndexWriter.WriteFn = func(valuesByKey map[string]tsm1.Values, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
|
||||||
w.Index = &MockIndexWriter{
|
|
||||||
fn: func(valuesByKey map[string]tsm1.Values, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
|
|
||||||
vals = valuesByKey
|
vals = valuesByKey
|
||||||
fields = measurementFieldsToSave
|
fields = measurementFieldsToSave
|
||||||
series = seriesToCreate
|
series = seriesToCreate
|
||||||
return nil
|
return nil
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := w.Open(); err != nil {
|
if err := w.Open(); err != nil {
|
||||||
|
@ -103,7 +108,7 @@ func TestWAL_TestWriteQueryOpen(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensure we close and after open it flushes to the index
|
// ensure we close and after open it flushes to the index
|
||||||
if err := w.Close(); err != nil {
|
if err := w.Log.Close(); err != nil {
|
||||||
t.Fatalf("failed to close: %s", err.Error())
|
t.Fatalf("failed to close: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,39 +145,277 @@ func TestWAL_TestWriteQueryOpen(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type Log struct {
|
// Ensure the log can handle random data.
|
||||||
*tsm1.Log
|
func TestLog_Quick(t *testing.T) {
|
||||||
path string
|
if testing.Short() {
|
||||||
|
t.Skip("short mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewWAL() *Log {
|
quick.Check(func(pointsSlice PointsSlice) bool {
|
||||||
dir, err := ioutil.TempDir("", "tsm1-test")
|
l := NewLog()
|
||||||
if err != nil {
|
l.FlushMemorySizeThreshold = 4096 // low threshold
|
||||||
panic("couldn't get temp dir")
|
defer l.Close()
|
||||||
|
|
||||||
|
var mu sync.Mutex
|
||||||
|
index := make(map[string]tsm1.Values)
|
||||||
|
|
||||||
|
// Ignore flush to the index.
|
||||||
|
l.IndexWriter.WriteFn = func(valuesByKey map[string]tsm1.Values, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
for key, values := range valuesByKey {
|
||||||
|
index[key] = append(index[key], values...)
|
||||||
}
|
}
|
||||||
|
|
||||||
l := &Log{
|
// Simulate slow index writes.
|
||||||
Log: tsm1.NewLog(dir),
|
time.Sleep(100 * time.Millisecond)
|
||||||
path: dir,
|
|
||||||
}
|
|
||||||
l.LoggingEnabled = true
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Log) Cleanup() error {
|
|
||||||
l.Close()
|
|
||||||
os.RemoveAll(l.path)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type MockIndexWriter struct {
|
// Open the log.
|
||||||
fn func(valuesByKey map[string]tsm1.Values, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error
|
if err := l.Open(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockIndexWriter) Write(valuesByKey map[string]tsm1.Values, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
|
// Generate fields and series to create.
|
||||||
return m.fn(valuesByKey, measurementFieldsToSave, seriesToCreate)
|
fieldsToWrite := pointsSlice.MeasurementFields()
|
||||||
|
seriesToWrite := pointsSlice.SeriesCreate()
|
||||||
|
|
||||||
|
// Write each set of points separately.
|
||||||
|
for _, points := range pointsSlice {
|
||||||
|
if err := l.WritePoints(points.Encode(), fieldsToWrite, seriesToWrite); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockIndexWriter) MarkDeletes(keys []string) {}
|
// Iterate over each series and read out cursor.
|
||||||
|
for _, series := range pointsSlice.Series() {
|
||||||
|
mu.Lock()
|
||||||
|
if got := mergeIndexCursor(series, l, index); !reflect.DeepEqual(got, series.Values) {
|
||||||
|
t.Fatalf("mismatch:\n\ngot=%v\n\nexp=%v\n\n", len(got), len(series.Values))
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
func (m *MockIndexWriter) MarkMeasurementDelete(name string) {}
|
// Reopen log.
|
||||||
|
if err := l.Reopen(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate over each series and read out cursor again.
|
||||||
|
for _, series := range pointsSlice.Series() {
|
||||||
|
mu.Lock()
|
||||||
|
if got := mergeIndexCursor(series, l, index); !reflect.DeepEqual(got, series.Values) {
|
||||||
|
t.Fatalf("mismatch(reopen):\n\ngot=%v\n\nexp=%v\n\n", len(got), len(series.Values))
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}, &quick.Config{
|
||||||
|
MaxCount: 10,
|
||||||
|
Values: func(values []reflect.Value, rand *rand.Rand) {
|
||||||
|
values[0] = reflect.ValueOf(GeneratePointsSlice(rand))
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeIndexCursor(series *Series, l *Log, index map[string]tsm1.Values) tsm1.Values {
|
||||||
|
c := l.Cursor(series.Name, series.FieldsSlice(), &tsdb.FieldCodec{}, true)
|
||||||
|
a := ReadAllCursor(c)
|
||||||
|
a = append(index[series.Name+"#!~#value"], a...)
|
||||||
|
a = DedupeValues(a)
|
||||||
|
sort.Sort(a)
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
type Log struct {
|
||||||
|
*tsm1.Log
|
||||||
|
IndexWriter IndexWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLog returns a new instance of Log
|
||||||
|
func NewLog() *Log {
|
||||||
|
path, err := ioutil.TempDir("", "tsm1-test")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l := &Log{Log: tsm1.NewLog(path)}
|
||||||
|
l.Log.IndexWriter = &l.IndexWriter
|
||||||
|
l.LoggingEnabled = true
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the log and removes the underlying temporary path.
|
||||||
|
func (l *Log) Close() error {
|
||||||
|
defer os.RemoveAll(l.Path())
|
||||||
|
return l.Log.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reopen closes and reopens the log.
|
||||||
|
func (l *Log) Reopen() error {
|
||||||
|
if err := l.Log.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := l.Log.Open(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexWriter represents a mock implementation of tsm1.IndexWriter.
|
||||||
|
type IndexWriter struct {
|
||||||
|
WriteFn func(valuesByKey map[string]tsm1.Values, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error
|
||||||
|
MarkDeletesFn func(keys []string)
|
||||||
|
MarkMeasurementDeleteFn func(name string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *IndexWriter) Write(valuesByKey map[string]tsm1.Values, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
|
||||||
|
return w.WriteFn(valuesByKey, measurementFieldsToSave, seriesToCreate)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *IndexWriter) MarkDeletes(keys []string) {
|
||||||
|
w.MarkDeletesFn(keys)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *IndexWriter) MarkMeasurementDelete(name string) {
|
||||||
|
w.MarkMeasurementDeleteFn(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PointsSlice represents a slice of point slices.
|
||||||
|
type PointsSlice []Points
|
||||||
|
|
||||||
|
// GeneratePointsSlice randomly generates a slice of slice of points.
|
||||||
|
func GeneratePointsSlice(rand *rand.Rand) PointsSlice {
|
||||||
|
var pointsSlice PointsSlice
|
||||||
|
for i, pointsN := 0, rand.Intn(100); i < pointsN; i++ {
|
||||||
|
var points Points
|
||||||
|
for j, pointN := 0, rand.Intn(1000); j < pointN; j++ {
|
||||||
|
points = append(points, Point{
|
||||||
|
Name: strconv.Itoa(rand.Intn(10)),
|
||||||
|
Fields: models.Fields{"value": rand.Int63n(100000)},
|
||||||
|
Time: time.Unix(0, rand.Int63n(int64(24*time.Hour))).UTC(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
pointsSlice = append(pointsSlice, points)
|
||||||
|
}
|
||||||
|
return pointsSlice
|
||||||
|
}
|
||||||
|
|
||||||
|
// MeasurementFields returns a set of fields used across all points.
|
||||||
|
func (a PointsSlice) MeasurementFields() map[string]*tsdb.MeasurementFields {
|
||||||
|
mfs := map[string]*tsdb.MeasurementFields{}
|
||||||
|
for _, points := range a {
|
||||||
|
for _, p := range points {
|
||||||
|
pp := p.Encode()
|
||||||
|
|
||||||
|
// Create measurement field, if not exists.
|
||||||
|
mf := mfs[string(pp.Key())]
|
||||||
|
if mf == nil {
|
||||||
|
mf = &tsdb.MeasurementFields{Fields: make(map[string]*tsdb.Field)}
|
||||||
|
mfs[string(pp.Key())] = mf
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add all fields on the point.
|
||||||
|
for name, value := range p.Fields {
|
||||||
|
mf.CreateFieldIfNotExists(name, influxql.InspectDataType(value), false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mfs
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeriesCreate returns a list of series to create across all points.
|
||||||
|
func (a PointsSlice) SeriesCreate() []*tsdb.SeriesCreate {
|
||||||
|
// Create unique set of series.
|
||||||
|
m := map[string]*tsdb.SeriesCreate{}
|
||||||
|
for _, points := range a {
|
||||||
|
for _, p := range points {
|
||||||
|
if pp := p.Encode(); m[string(pp.Key())] == nil {
|
||||||
|
m[string(pp.Key())] = &tsdb.SeriesCreate{Measurement: pp.Name(), Series: tsdb.NewSeries(string(string(pp.Key())), pp.Tags())}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to slice.
|
||||||
|
slice := make([]*tsdb.SeriesCreate, 0, len(m))
|
||||||
|
for _, v := range m {
|
||||||
|
slice = append(slice, v)
|
||||||
|
}
|
||||||
|
return slice
|
||||||
|
}
|
||||||
|
|
||||||
|
// Series returns a set of per-series data.
|
||||||
|
func (a PointsSlice) Series() map[string]*Series {
|
||||||
|
m := map[string]*Series{}
|
||||||
|
for _, points := range a {
|
||||||
|
for _, p := range points {
|
||||||
|
pp := p.Encode()
|
||||||
|
|
||||||
|
// Create series if not exists.
|
||||||
|
s := m[string(pp.Key())]
|
||||||
|
if s == nil {
|
||||||
|
s = &Series{
|
||||||
|
Name: string(pp.Key()),
|
||||||
|
Fields: make(map[string]struct{}),
|
||||||
|
}
|
||||||
|
m[string(pp.Key())] = s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append point data.
|
||||||
|
s.Values = append(s.Values, tsm1.NewValue(p.Time, p.Fields["value"]))
|
||||||
|
|
||||||
|
// Add fields.
|
||||||
|
for k := range p.Fields {
|
||||||
|
s.Fields[k] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deduplicate & sort items in each series.
|
||||||
|
for _, s := range m {
|
||||||
|
s.Values = DedupeValues(s.Values)
|
||||||
|
sort.Sort(s.Values)
|
||||||
|
}
|
||||||
|
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// Points represents a slice of points.
|
||||||
|
type Points []Point
|
||||||
|
|
||||||
|
func (a Points) Encode() []models.Point {
|
||||||
|
other := make([]models.Point, len(a))
|
||||||
|
for i := range a {
|
||||||
|
other[i] = a[i].Encode()
|
||||||
|
}
|
||||||
|
return other
|
||||||
|
}
|
||||||
|
|
||||||
|
// Point represents a test point
|
||||||
|
type Point struct {
|
||||||
|
Name string
|
||||||
|
Tags models.Tags
|
||||||
|
Fields models.Fields
|
||||||
|
Time time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Point) Encode() models.Point { return models.NewPoint(p.Name, p.Tags, p.Fields, p.Time) }
|
||||||
|
|
||||||
|
type Series struct {
|
||||||
|
Name string
|
||||||
|
Fields map[string]struct{}
|
||||||
|
Values tsm1.Values
|
||||||
|
}
|
||||||
|
|
||||||
|
// FieldsSlice returns a list of field names.
|
||||||
|
func (s *Series) FieldsSlice() []string {
|
||||||
|
a := make([]string, 0, len(s.Fields))
|
||||||
|
for k := range s.Fields {
|
||||||
|
a = append(a, k)
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
2
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/write_lock.go
generated
vendored
2
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/write_lock.go
generated
vendored
|
@ -57,7 +57,7 @@ func (w *WriteLock) UnlockRange(min, max int64) {
|
||||||
defer w.rangesLock.Unlock()
|
defer w.rangesLock.Unlock()
|
||||||
|
|
||||||
// take the range out of the slice and unlock it
|
// take the range out of the slice and unlock it
|
||||||
a := make([]*rangeLock, 0)
|
var a []*rangeLock
|
||||||
for _, r := range w.ranges {
|
for _, r := range w.ranges {
|
||||||
if r.min == min && r.max == max {
|
if r.min == min && r.max == max {
|
||||||
r.mu.Unlock()
|
r.mu.Unlock()
|
||||||
|
|
37
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/write_lock_test.go
generated
vendored
37
Godeps/_workspace/src/github.com/influxdb/influxdb/tsdb/engine/tsm1/write_lock_test.go
generated
vendored
|
@ -1,8 +1,9 @@
|
||||||
package tsm1_test
|
package tsm1_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
// "sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
"testing/quick"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/tsdb/engine/tsm1"
|
"github.com/influxdb/influxdb/tsdb/engine/tsm1"
|
||||||
|
@ -129,3 +130,37 @@ func TestWriteLock_Same(t *testing.T) {
|
||||||
// // we're all good
|
// // we're all good
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
|
|
||||||
|
func TestWriteLock_Quick(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
quick.Check(func(extents []struct{ Min, Max uint64 }) bool {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
var mu tsm1.WriteLock
|
||||||
|
for _, extent := range extents {
|
||||||
|
// Limit range.
|
||||||
|
extent.Min %= 10
|
||||||
|
extent.Max %= 10
|
||||||
|
|
||||||
|
// Reverse if out of order.
|
||||||
|
if extent.Min > extent.Max {
|
||||||
|
extent.Min, extent.Max = extent.Max, extent.Min
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock, wait and unlock in a separate goroutine.
|
||||||
|
wg.Add(1)
|
||||||
|
go func(min, max int64) {
|
||||||
|
defer wg.Done()
|
||||||
|
mu.LockRange(min, max)
|
||||||
|
time.Sleep(1 * time.Millisecond)
|
||||||
|
mu.UnlockRange(min, max)
|
||||||
|
}(int64(extent.Min), int64(extent.Max))
|
||||||
|
}
|
||||||
|
|
||||||
|
// All locks should return.
|
||||||
|
wg.Wait()
|
||||||
|
return true
|
||||||
|
}, nil)
|
||||||
|
}
|
||||||
|
|
|
@ -625,53 +625,58 @@ func (e *SelectExecutor) processFunctions(results [][]interface{}, columnNames [
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *SelectExecutor) processSelectors(results [][]interface{}, callPosition int, hasTimeField bool, columnNames []string) ([][]interface{}, error) {
|
func (e *SelectExecutor) processSelectors(results [][]interface{}, callPosition int, hasTimeField bool, columnNames []string) ([][]interface{}, error) {
|
||||||
for i, vals := range results {
|
// if the columns doesn't have enough columns, expand it
|
||||||
for j := 1; j < len(vals); j++ {
|
for i, columns := range results {
|
||||||
switch v := vals[j].(type) {
|
if len(columns) != len(columnNames) {
|
||||||
|
columns = append(columns, make([]interface{}, len(columnNames)-len(columns))...)
|
||||||
|
}
|
||||||
|
for j := 1; j < len(columns); j++ {
|
||||||
|
switch v := columns[j].(type) {
|
||||||
case PositionPoint:
|
case PositionPoint:
|
||||||
tMin := vals[0].(time.Time)
|
tMin := columns[0].(time.Time)
|
||||||
results[i] = e.selectorPointToQueryResult(vals, hasTimeField, callPosition, v, tMin, columnNames)
|
results[i] = e.selectorPointToQueryResult(columns, hasTimeField, callPosition, v, tMin, columnNames)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return results, nil
|
return results, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *SelectExecutor) selectorPointToQueryResult(row []interface{}, hasTimeField bool, columnIndex int, p PositionPoint, tMin time.Time, columnNames []string) []interface{} {
|
func (e *SelectExecutor) selectorPointToQueryResult(columns []interface{}, hasTimeField bool, columnIndex int, p PositionPoint, tMin time.Time, columnNames []string) []interface{} {
|
||||||
// if the row doesn't have enough columns, expand it
|
|
||||||
if len(row) != len(columnNames) {
|
|
||||||
row = append(row, make([]interface{}, len(columnNames)-len(row))...)
|
|
||||||
}
|
|
||||||
callCount := len(e.stmt.FunctionCalls())
|
callCount := len(e.stmt.FunctionCalls())
|
||||||
if callCount == 1 {
|
if callCount == 1 {
|
||||||
tm := time.Unix(0, p.Time).UTC().Format(time.RFC3339Nano)
|
tm := time.Unix(0, p.Time).UTC()
|
||||||
// If we didn't explicity ask for time, and we have a group by, then use TMIN for the time returned
|
// If we didn't explicity ask for time, and we have a group by, then use TMIN for the time returned
|
||||||
if len(e.stmt.Dimensions) > 0 && !hasTimeField {
|
if len(e.stmt.Dimensions) > 0 && !hasTimeField {
|
||||||
tm = tMin.UTC().Format(time.RFC3339Nano)
|
tm = tMin.UTC()
|
||||||
}
|
}
|
||||||
row[0] = tm
|
columns[0] = tm
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range columnNames {
|
for i, c := range columnNames {
|
||||||
// skip over time, we already handled that above
|
// skip over time, we already handled that above
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if (i == columnIndex && hasTimeField) || (i == columnIndex+1 && !hasTimeField) {
|
if (i == columnIndex && hasTimeField) || (i == columnIndex+1 && !hasTimeField) {
|
||||||
row[i] = p.Value
|
// Check to see if we previously processed this column, if so, continue
|
||||||
|
if _, ok := columns[i].(PositionPoint); !ok && columns[i] != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
columns[i] = p.Value
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if callCount == 1 {
|
if callCount == 1 {
|
||||||
// Always favor fields over tags if there is a name collision
|
// Always favor fields over tags if there is a name collision
|
||||||
if t, ok := p.Fields[c]; ok {
|
if t, ok := p.Fields[c]; ok {
|
||||||
row[i] = t
|
columns[i] = t
|
||||||
} else if t, ok := p.Tags[c]; ok {
|
} else if t, ok := p.Tags[c]; ok {
|
||||||
// look in the tags for a value
|
// look in the tags for a value
|
||||||
row[i] = t
|
columns[i] = t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return row
|
return columns
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *SelectExecutor) processAggregates(results [][]interface{}, columnNames []string, call *influxql.Call) ([][]interface{}, error) {
|
func (e *SelectExecutor) processAggregates(results [][]interface{}, columnNames []string, call *influxql.Call) ([][]interface{}, error) {
|
||||||
|
@ -699,10 +704,10 @@ func (e *SelectExecutor) processAggregates(results [][]interface{}, columnNames
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *SelectExecutor) aggregatePointToQueryResult(p PositionPoint, tMin time.Time, call *influxql.Call, columnNames []string) []interface{} {
|
func (e *SelectExecutor) aggregatePointToQueryResult(p PositionPoint, tMin time.Time, call *influxql.Call, columnNames []string) []interface{} {
|
||||||
tm := time.Unix(0, p.Time).UTC().Format(time.RFC3339Nano)
|
tm := time.Unix(0, p.Time).UTC()
|
||||||
// If we didn't explicity ask for time, and we have a group by, then use TMIN for the time returned
|
// If we didn't explicity ask for time, and we have a group by, then use TMIN for the time returned
|
||||||
if len(e.stmt.Dimensions) > 0 && !e.stmt.HasTimeFieldSpecified() {
|
if len(e.stmt.Dimensions) > 0 && !e.stmt.HasTimeFieldSpecified() {
|
||||||
tm = tMin.UTC().Format(time.RFC3339Nano)
|
tm = tMin.UTC()
|
||||||
}
|
}
|
||||||
vals := []interface{}{tm}
|
vals := []interface{}{tm}
|
||||||
for _, c := range columnNames {
|
for _, c := range columnNames {
|
||||||
|
|
|
@ -154,11 +154,18 @@ func initializeReduceFunc(c *influxql.Call) (reduceFunc, error) {
|
||||||
return ReduceLast, nil
|
return ReduceLast, nil
|
||||||
case "top", "bottom":
|
case "top", "bottom":
|
||||||
return func(values []interface{}) interface{} {
|
return func(values []interface{}) interface{} {
|
||||||
return ReduceTopBottom(values, c)
|
lit, _ := c.Args[len(c.Args)-1].(*influxql.NumberLiteral)
|
||||||
|
limit := int(lit.Val)
|
||||||
|
fields := topCallArgs(c)
|
||||||
|
return ReduceTopBottom(values, limit, fields, c.Name)
|
||||||
}, nil
|
}, nil
|
||||||
case "percentile":
|
case "percentile":
|
||||||
return func(values []interface{}) interface{} {
|
return func(values []interface{}) interface{} {
|
||||||
return ReducePercentile(values, c)
|
// Checks that this arg exists and is a valid type are done in the parsing validation
|
||||||
|
// and have test coverage there
|
||||||
|
lit, _ := c.Args[1].(*influxql.NumberLiteral)
|
||||||
|
percentile := lit.Val
|
||||||
|
return ReducePercentile(values, percentile)
|
||||||
}, nil
|
}, nil
|
||||||
case "derivative", "non_negative_derivative":
|
case "derivative", "non_negative_derivative":
|
||||||
// If the arg is another aggregate e.g. derivative(mean(value)), then
|
// If the arg is another aggregate e.g. derivative(mean(value)), then
|
||||||
|
@ -1574,12 +1581,10 @@ func MapTopBottom(input *MapInput, limit int, fields []string, argCount int, cal
|
||||||
|
|
||||||
// ReduceTop computes the top values for each key.
|
// ReduceTop computes the top values for each key.
|
||||||
// This function assumes that its inputs are in sorted ascending order.
|
// This function assumes that its inputs are in sorted ascending order.
|
||||||
func ReduceTopBottom(values []interface{}, c *influxql.Call) interface{} {
|
func ReduceTopBottom(values []interface{}, limit int, fields []string, callName string) interface{} {
|
||||||
lit, _ := c.Args[len(c.Args)-1].(*influxql.NumberLiteral)
|
|
||||||
limit := int(lit.Val)
|
|
||||||
|
|
||||||
out := positionOut{callArgs: topCallArgs(c)}
|
out := positionOut{callArgs: fields}
|
||||||
minheap := topBottomMapOut{&out, c.Name == "bottom"}
|
minheap := topBottomMapOut{&out, callName == "bottom"}
|
||||||
results := make([]PositionPoints, 0, len(values))
|
results := make([]PositionPoints, 0, len(values))
|
||||||
out.points = make([]PositionPoint, 0, limit)
|
out.points = make([]PositionPoint, 0, limit)
|
||||||
for _, v := range values {
|
for _, v := range values {
|
||||||
|
@ -1606,7 +1611,7 @@ func ReduceTopBottom(values []interface{}, c *influxql.Call) interface{} {
|
||||||
if whichselected == -1 {
|
if whichselected == -1 {
|
||||||
// none of the points have any values
|
// none of the points have any values
|
||||||
// so we can return what we have now
|
// so we can return what we have now
|
||||||
sort.Sort(topBottomReduceOut{out, c.Name == "bottom"})
|
sort.Sort(topBottomReduceOut{out, callName == "bottom"})
|
||||||
return out.points
|
return out.points
|
||||||
}
|
}
|
||||||
v := results[whichselected]
|
v := results[whichselected]
|
||||||
|
@ -1615,7 +1620,7 @@ func ReduceTopBottom(values []interface{}, c *influxql.Call) interface{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
// now we need to resort the tops by time
|
// now we need to resort the tops by time
|
||||||
sort.Sort(topBottomReduceOut{out, c.Name == "bottom"})
|
sort.Sort(topBottomReduceOut{out, callName == "bottom"})
|
||||||
return out.points
|
return out.points
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1629,11 +1634,7 @@ func MapEcho(input *MapInput) interface{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReducePercentile computes the percentile of values for each key.
|
// ReducePercentile computes the percentile of values for each key.
|
||||||
func ReducePercentile(values []interface{}, c *influxql.Call) interface{} {
|
func ReducePercentile(values []interface{}, percentile float64) interface{} {
|
||||||
// Checks that this arg exists and is a valid type are done in the parsing validation
|
|
||||||
// and have test coverage there
|
|
||||||
lit, _ := c.Args[1].(*influxql.NumberLiteral)
|
|
||||||
percentile := lit.Val
|
|
||||||
|
|
||||||
var allValues []float64
|
var allValues []float64
|
||||||
|
|
||||||
|
|
|
@ -101,7 +101,7 @@ func TestReducePercentileNil(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReducePercentile should ignore nil values when calculating the percentile
|
// ReducePercentile should ignore nil values when calculating the percentile
|
||||||
got := ReducePercentile(input, &influxql.Call{Name: "top", Args: []influxql.Expr{&influxql.VarRef{Val: "field1"}, &influxql.NumberLiteral{Val: 100}}})
|
got := ReducePercentile(input, 100)
|
||||||
if got != nil {
|
if got != nil {
|
||||||
t.Fatalf("ReducePercentile(100) returned wrong type. exp nil got %v", got)
|
t.Fatalf("ReducePercentile(100) returned wrong type. exp nil got %v", got)
|
||||||
}
|
}
|
||||||
|
@ -847,7 +847,10 @@ func TestReduceTopBottom(t *testing.T) {
|
||||||
if test.skip {
|
if test.skip {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
values := ReduceTopBottom(test.values, test.call)
|
lit, _ := test.call.Args[len(test.call.Args)-1].(*influxql.NumberLiteral)
|
||||||
|
limit := int(lit.Val)
|
||||||
|
fields := topCallArgs(test.call)
|
||||||
|
values := ReduceTopBottom(test.values, limit, fields, test.call.Name)
|
||||||
t.Logf("Test: %s", test.name)
|
t.Logf("Test: %s", test.name)
|
||||||
if values != nil {
|
if values != nil {
|
||||||
v, _ := values.(PositionPoints)
|
v, _ := values.(PositionPoints)
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
package tsdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"github.com/influxdb/influxdb/influxql"
|
||||||
|
"github.com/influxdb/influxdb/models"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// convertRowToPoints will convert a query result Row into Points that can be written back in.
|
||||||
|
// Used for INTO queries
|
||||||
|
func convertRowToPoints(measurementName string, row *models.Row) ([]models.Point, error) {
|
||||||
|
// figure out which parts of the result are the time and which are the fields
|
||||||
|
timeIndex := -1
|
||||||
|
fieldIndexes := make(map[string]int)
|
||||||
|
for i, c := range row.Columns {
|
||||||
|
if c == "time" {
|
||||||
|
timeIndex = i
|
||||||
|
} else {
|
||||||
|
fieldIndexes[c] = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if timeIndex == -1 {
|
||||||
|
return nil, errors.New("error finding time index in result")
|
||||||
|
}
|
||||||
|
|
||||||
|
points := make([]models.Point, 0, len(row.Values))
|
||||||
|
for _, v := range row.Values {
|
||||||
|
vals := make(map[string]interface{})
|
||||||
|
for fieldName, fieldIndex := range fieldIndexes {
|
||||||
|
vals[fieldName] = v[fieldIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
p := models.NewPoint(measurementName, row.Tags, vals, v[timeIndex].(time.Time))
|
||||||
|
|
||||||
|
points = append(points, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return points, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func intoDB(stmt *influxql.SelectStatement) (string, error) {
|
||||||
|
if stmt.Target.Measurement.Database != "" {
|
||||||
|
return stmt.Target.Measurement.Database, nil
|
||||||
|
}
|
||||||
|
return "", errNoDatabaseInTarget
|
||||||
|
}
|
||||||
|
|
||||||
|
var errNoDatabaseInTarget = errors.New("no database in target")
|
||||||
|
|
||||||
|
func intoRP(stmt *influxql.SelectStatement) string { return stmt.Target.Measurement.RetentionPolicy }
|
||||||
|
func intoMeasurement(stmt *influxql.SelectStatement) string { return stmt.Target.Measurement.Name }
|
|
@ -136,7 +136,7 @@ func (db *DatabaseIndex) measurementsByExpr(expr influxql.Expr) (Measurements, e
|
||||||
case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX:
|
case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX:
|
||||||
tag, ok := e.LHS.(*influxql.VarRef)
|
tag, ok := e.LHS.(*influxql.VarRef)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("left side of '%s' must be a tag name", e.Op.String())
|
return nil, fmt.Errorf("left side of '%s' must be a tag key", e.Op.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
tf := &TagFilter{
|
tf := &TagFilter{
|
||||||
|
@ -603,7 +603,7 @@ func (m *Measurement) DropSeries(seriesID uint64) {
|
||||||
// filters walks the where clause of a select statement and returns a map with all series ids
|
// filters walks the where clause of a select statement and returns a map with all series ids
|
||||||
// matching the where clause and any filter expression that should be applied to each
|
// matching the where clause and any filter expression that should be applied to each
|
||||||
func (m *Measurement) filters(stmt *influxql.SelectStatement) (map[uint64]influxql.Expr, error) {
|
func (m *Measurement) filters(stmt *influxql.SelectStatement) (map[uint64]influxql.Expr, error) {
|
||||||
if stmt.Condition == nil || stmt.OnlyTimeDimensions() {
|
if stmt.Condition == nil || influxql.OnlyTimeExpr(stmt.Condition) {
|
||||||
seriesIdsToExpr := make(map[uint64]influxql.Expr)
|
seriesIdsToExpr := make(map[uint64]influxql.Expr)
|
||||||
for _, id := range m.seriesIDs {
|
for _, id := range m.seriesIDs {
|
||||||
seriesIdsToExpr[id] = nil
|
seriesIdsToExpr[id] = nil
|
||||||
|
@ -699,7 +699,7 @@ func (m *Measurement) TagSets(stmt *influxql.SelectStatement, dimensions []strin
|
||||||
}
|
}
|
||||||
|
|
||||||
// mergeSeriesFilters merges two sets of filter expressions and culls series IDs.
|
// mergeSeriesFilters merges two sets of filter expressions and culls series IDs.
|
||||||
func mergeSeriesFilters(op influxql.Token, ids SeriesIDs, lfilters, rfilters map[uint64]influxql.Expr) (SeriesIDs, map[uint64]influxql.Expr) {
|
func mergeSeriesFilters(op influxql.Token, ids SeriesIDs, lfilters, rfilters FilterExprs) (SeriesIDs, FilterExprs) {
|
||||||
// Create a map to hold the final set of series filter expressions.
|
// Create a map to hold the final set of series filter expressions.
|
||||||
filters := make(map[uint64]influxql.Expr, 0)
|
filters := make(map[uint64]influxql.Expr, 0)
|
||||||
// Resulting list of series IDs
|
// Resulting list of series IDs
|
||||||
|
@ -833,10 +833,30 @@ func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Ex
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FilterExprs represents a map of series IDs to filter expressions.
|
||||||
|
type FilterExprs map[uint64]influxql.Expr
|
||||||
|
|
||||||
|
// DeleteBoolLiteralTrues deletes all elements whose filter expression is a boolean literal true.
|
||||||
|
func (fe FilterExprs) DeleteBoolLiteralTrues() {
|
||||||
|
for id, expr := range fe {
|
||||||
|
if e, ok := expr.(*influxql.BooleanLiteral); ok && e.Val == true {
|
||||||
|
delete(fe, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of elements.
|
||||||
|
func (fe FilterExprs) Len() int {
|
||||||
|
if fe == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return len(fe)
|
||||||
|
}
|
||||||
|
|
||||||
// walkWhereForSeriesIds recursively walks the WHERE clause and returns an ordered set of series IDs and
|
// walkWhereForSeriesIds recursively walks the WHERE clause and returns an ordered set of series IDs and
|
||||||
// a map from those series IDs to filter expressions that should be used to limit points returned in
|
// a map from those series IDs to filter expressions that should be used to limit points returned in
|
||||||
// the final query result.
|
// the final query result.
|
||||||
func (m *Measurement) walkWhereForSeriesIds(expr influxql.Expr) (SeriesIDs, map[uint64]influxql.Expr, error) {
|
func (m *Measurement) walkWhereForSeriesIds(expr influxql.Expr) (SeriesIDs, FilterExprs, error) {
|
||||||
switch n := expr.(type) {
|
switch n := expr.(type) {
|
||||||
case *influxql.BinaryExpr:
|
case *influxql.BinaryExpr:
|
||||||
switch n.Op {
|
switch n.Op {
|
||||||
|
@ -847,7 +867,7 @@ func (m *Measurement) walkWhereForSeriesIds(expr influxql.Expr) (SeriesIDs, map[
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
filters := map[uint64]influxql.Expr{}
|
filters := FilterExprs{}
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
filters[id] = expr
|
filters[id] = expr
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,6 +46,10 @@ type QueryExecutor struct {
|
||||||
CreateMapper(shard meta.ShardInfo, stmt influxql.Statement, chunkSize int) (Mapper, error)
|
CreateMapper(shard meta.ShardInfo, stmt influxql.Statement, chunkSize int) (Mapper, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
IntoWriter interface {
|
||||||
|
WritePointsInto(p *IntoWriteRequest) error
|
||||||
|
}
|
||||||
|
|
||||||
Logger *log.Logger
|
Logger *log.Logger
|
||||||
QueryLogEnabled bool
|
QueryLogEnabled bool
|
||||||
|
|
||||||
|
@ -53,6 +57,13 @@ type QueryExecutor struct {
|
||||||
Store *Store
|
Store *Store
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// partial copy of cluster.WriteRequest
|
||||||
|
type IntoWriteRequest struct {
|
||||||
|
Database string
|
||||||
|
RetentionPolicy string
|
||||||
|
Points []models.Point
|
||||||
|
}
|
||||||
|
|
||||||
// NewQueryExecutor returns an initialized QueryExecutor
|
// NewQueryExecutor returns an initialized QueryExecutor
|
||||||
func NewQueryExecutor(store *Store) *QueryExecutor {
|
func NewQueryExecutor(store *Store) *QueryExecutor {
|
||||||
return &QueryExecutor{
|
return &QueryExecutor{
|
||||||
|
@ -275,34 +286,6 @@ func (q *QueryExecutor) PlanSelect(stmt *influxql.SelectStatement, chunkSize int
|
||||||
return executor, nil
|
return executor, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// executeSelectStatement plans and executes a select statement against a database.
|
|
||||||
func (q *QueryExecutor) executeSelectStatement(statementID int, stmt *influxql.SelectStatement, results chan *influxql.Result, chunkSize int) error {
|
|
||||||
// Plan statement execution.
|
|
||||||
e, err := q.PlanSelect(stmt, chunkSize)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute plan.
|
|
||||||
ch := e.Execute()
|
|
||||||
|
|
||||||
// Stream results from the channel. We should send an empty result if nothing comes through.
|
|
||||||
resultSent := false
|
|
||||||
for row := range ch {
|
|
||||||
if row.Err != nil {
|
|
||||||
return row.Err
|
|
||||||
}
|
|
||||||
resultSent = true
|
|
||||||
results <- &influxql.Result{StatementID: statementID, Series: []*models.Row{row}}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !resultSent {
|
|
||||||
results <- &influxql.Result{StatementID: statementID, Series: make([]*models.Row, 0)}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// expandSources expands regex sources and removes duplicates.
|
// expandSources expands regex sources and removes duplicates.
|
||||||
// NOTE: sources must be normalized (db and rp set) before calling this function.
|
// NOTE: sources must be normalized (db and rp set) before calling this function.
|
||||||
func (q *QueryExecutor) expandSources(sources influxql.Sources) (influxql.Sources, error) {
|
func (q *QueryExecutor) expandSources(sources influxql.Sources) (influxql.Sources, error) {
|
||||||
|
@ -416,6 +399,11 @@ func (q *QueryExecutor) executeDropMeasurementStatement(stmt *influxql.DropMeasu
|
||||||
|
|
||||||
// executeDropSeriesStatement removes all series from the local store that match the drop query
|
// executeDropSeriesStatement removes all series from the local store that match the drop query
|
||||||
func (q *QueryExecutor) executeDropSeriesStatement(stmt *influxql.DropSeriesStatement, database string) *influxql.Result {
|
func (q *QueryExecutor) executeDropSeriesStatement(stmt *influxql.DropSeriesStatement, database string) *influxql.Result {
|
||||||
|
// Check for time in WHERE clause (not supported).
|
||||||
|
if influxql.HasTimeExpr(stmt.Condition) {
|
||||||
|
return &influxql.Result{Err: errors.New("DROP SERIES doesn't support time in WHERE clause")}
|
||||||
|
}
|
||||||
|
|
||||||
// Find the database.
|
// Find the database.
|
||||||
db := q.Store.DatabaseIndex(database)
|
db := q.Store.DatabaseIndex(database)
|
||||||
if db == nil {
|
if db == nil {
|
||||||
|
@ -438,12 +426,23 @@ func (q *QueryExecutor) executeDropSeriesStatement(stmt *influxql.DropSeriesStat
|
||||||
var seriesKeys []string
|
var seriesKeys []string
|
||||||
for _, m := range measurements {
|
for _, m := range measurements {
|
||||||
var ids SeriesIDs
|
var ids SeriesIDs
|
||||||
|
var filters FilterExprs
|
||||||
if stmt.Condition != nil {
|
if stmt.Condition != nil {
|
||||||
// Get series IDs that match the WHERE clause.
|
// Get series IDs that match the WHERE clause.
|
||||||
ids, _, err = m.walkWhereForSeriesIds(stmt.Condition)
|
ids, filters, err = m.walkWhereForSeriesIds(stmt.Condition)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &influxql.Result{Err: err}
|
return &influxql.Result{Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Delete boolean literal true filter expressions.
|
||||||
|
// These are returned for `WHERE tagKey = 'tagVal'` type expressions and are okay.
|
||||||
|
filters.DeleteBoolLiteralTrues()
|
||||||
|
|
||||||
|
// Check for unsupported field filters.
|
||||||
|
// Any remaining filters means there were fields (e.g., `WHERE value = 1.2`).
|
||||||
|
if filters.Len() > 0 {
|
||||||
|
return &influxql.Result{Err: errors.New("DROP SERIES doesn't support fields in WHERE clause")}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// No WHERE clause so get all series IDs for this measurement.
|
// No WHERE clause so get all series IDs for this measurement.
|
||||||
ids = m.seriesIDs
|
ids = m.seriesIDs
|
||||||
|
@ -465,6 +464,11 @@ func (q *QueryExecutor) executeDropSeriesStatement(stmt *influxql.DropSeriesStat
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QueryExecutor) executeShowSeriesStatement(stmt *influxql.ShowSeriesStatement, database string) *influxql.Result {
|
func (q *QueryExecutor) executeShowSeriesStatement(stmt *influxql.ShowSeriesStatement, database string) *influxql.Result {
|
||||||
|
// Check for time in WHERE clause (not supported).
|
||||||
|
if influxql.HasTimeExpr(stmt.Condition) {
|
||||||
|
return &influxql.Result{Err: errors.New("SHOW SERIES doesn't support time in WHERE clause")}
|
||||||
|
}
|
||||||
|
|
||||||
// Find the database.
|
// Find the database.
|
||||||
db := q.Store.DatabaseIndex(database)
|
db := q.Store.DatabaseIndex(database)
|
||||||
if db == nil {
|
if db == nil {
|
||||||
|
@ -491,20 +495,27 @@ func (q *QueryExecutor) executeShowSeriesStatement(stmt *influxql.ShowSeriesStat
|
||||||
// Loop through measurements to build result. One result row / measurement.
|
// Loop through measurements to build result. One result row / measurement.
|
||||||
for _, m := range measurements {
|
for _, m := range measurements {
|
||||||
var ids SeriesIDs
|
var ids SeriesIDs
|
||||||
|
var filters FilterExprs
|
||||||
|
|
||||||
if stmt.Condition != nil {
|
if stmt.Condition != nil {
|
||||||
// Get series IDs that match the WHERE clause.
|
// Get series IDs that match the WHERE clause.
|
||||||
ids, _, err = m.walkWhereForSeriesIds(stmt.Condition)
|
ids, filters, err = m.walkWhereForSeriesIds(stmt.Condition)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &influxql.Result{Err: err}
|
return &influxql.Result{Err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Delete boolean literal true filter expressions.
|
||||||
|
filters.DeleteBoolLiteralTrues()
|
||||||
|
|
||||||
|
// Check for unsupported field filters.
|
||||||
|
if filters.Len() > 0 {
|
||||||
|
return &influxql.Result{Err: errors.New("SHOW SERIES doesn't support fields in WHERE clause")}
|
||||||
|
}
|
||||||
|
|
||||||
// If no series matched, then go to the next measurement.
|
// If no series matched, then go to the next measurement.
|
||||||
if len(ids) == 0 {
|
if len(ids) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: check return of walkWhereForSeriesIds for fields
|
|
||||||
} else {
|
} else {
|
||||||
// No WHERE clause so get all series IDs for this measurement.
|
// No WHERE clause so get all series IDs for this measurement.
|
||||||
ids = m.seriesIDs
|
ids = m.seriesIDs
|
||||||
|
@ -590,6 +601,11 @@ func (q *QueryExecutor) planStatement(stmt influxql.Statement, database string,
|
||||||
|
|
||||||
// PlanShowMeasurements creates an execution plan for a SHOW TAG KEYS statement and returns an Executor.
|
// PlanShowMeasurements creates an execution plan for a SHOW TAG KEYS statement and returns an Executor.
|
||||||
func (q *QueryExecutor) PlanShowMeasurements(stmt *influxql.ShowMeasurementsStatement, database string, chunkSize int) (Executor, error) {
|
func (q *QueryExecutor) PlanShowMeasurements(stmt *influxql.ShowMeasurementsStatement, database string, chunkSize int) (Executor, error) {
|
||||||
|
// Check for time in WHERE clause (not supported).
|
||||||
|
if influxql.HasTimeExpr(stmt.Condition) {
|
||||||
|
return nil, errors.New("SHOW MEASUREMENTS doesn't support time in WHERE clause")
|
||||||
|
}
|
||||||
|
|
||||||
// Get the database info.
|
// Get the database info.
|
||||||
di, err := q.MetaStore.Database(database)
|
di, err := q.MetaStore.Database(database)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -621,6 +637,11 @@ func (q *QueryExecutor) PlanShowMeasurements(stmt *influxql.ShowMeasurementsStat
|
||||||
|
|
||||||
// PlanShowTagKeys creates an execution plan for a SHOW MEASUREMENTS statement and returns an Executor.
|
// PlanShowTagKeys creates an execution plan for a SHOW MEASUREMENTS statement and returns an Executor.
|
||||||
func (q *QueryExecutor) PlanShowTagKeys(stmt *influxql.ShowTagKeysStatement, database string, chunkSize int) (Executor, error) {
|
func (q *QueryExecutor) PlanShowTagKeys(stmt *influxql.ShowTagKeysStatement, database string, chunkSize int) (Executor, error) {
|
||||||
|
// Check for time in WHERE clause (not supported).
|
||||||
|
if influxql.HasTimeExpr(stmt.Condition) {
|
||||||
|
return nil, errors.New("SHOW TAG KEYS doesn't support time in WHERE clause")
|
||||||
|
}
|
||||||
|
|
||||||
// Get the database info.
|
// Get the database info.
|
||||||
di, err := q.MetaStore.Database(database)
|
di, err := q.MetaStore.Database(database)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -659,16 +680,48 @@ func (q *QueryExecutor) executeStatement(statementID int, stmt influxql.Statemen
|
||||||
|
|
||||||
// Execute plan.
|
// Execute plan.
|
||||||
ch := e.Execute()
|
ch := e.Execute()
|
||||||
|
var writeerr error
|
||||||
|
var intoNum int64
|
||||||
|
var isinto bool
|
||||||
// Stream results from the channel. We should send an empty result if nothing comes through.
|
// Stream results from the channel. We should send an empty result if nothing comes through.
|
||||||
resultSent := false
|
resultSent := false
|
||||||
for row := range ch {
|
for row := range ch {
|
||||||
|
// We had a write error. Continue draining results from the channel
|
||||||
|
// so we don't hang the goroutine in the executor.
|
||||||
|
if writeerr != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if row.Err != nil {
|
if row.Err != nil {
|
||||||
return row.Err
|
return row.Err
|
||||||
}
|
}
|
||||||
|
selectstmt, ok := stmt.(*influxql.SelectStatement)
|
||||||
|
if ok && selectstmt.Target != nil {
|
||||||
|
isinto = true
|
||||||
|
// this is a into query. Write results back to database
|
||||||
|
writeerr = q.writeInto(row, selectstmt)
|
||||||
|
intoNum += int64(len(row.Values))
|
||||||
|
} else {
|
||||||
resultSent = true
|
resultSent = true
|
||||||
results <- &influxql.Result{StatementID: statementID, Series: []*models.Row{row}}
|
results <- &influxql.Result{StatementID: statementID, Series: []*models.Row{row}}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
if writeerr != nil {
|
||||||
|
return writeerr
|
||||||
|
} else if isinto {
|
||||||
|
results <- &influxql.Result{
|
||||||
|
StatementID: statementID,
|
||||||
|
Series: []*models.Row{{
|
||||||
|
Name: "result",
|
||||||
|
// it seems weird to give a time here, but so much stuff breaks if you don't
|
||||||
|
Columns: []string{"time", "written"},
|
||||||
|
Values: [][]interface{}{{
|
||||||
|
time.Unix(0, 0).UTC(),
|
||||||
|
intoNum,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
if !resultSent {
|
if !resultSent {
|
||||||
results <- &influxql.Result{StatementID: statementID, Series: make([]*models.Row, 0)}
|
results <- &influxql.Result{StatementID: statementID, Series: make([]*models.Row, 0)}
|
||||||
|
@ -677,33 +730,50 @@ func (q *QueryExecutor) executeStatement(statementID int, stmt influxql.Statemen
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QueryExecutor) executeShowMeasurementsStatement(statementID int, stmt *influxql.ShowMeasurementsStatement, database string, results chan *influxql.Result, chunkSize int) error { // Plan statement execution.
|
func (q *QueryExecutor) writeInto(row *models.Row, selectstmt *influxql.SelectStatement) error {
|
||||||
e, err := q.PlanShowMeasurements(stmt, database, chunkSize)
|
// It might seem a bit weird that this is where we do this, since we will have to
|
||||||
|
// convert rows back to points. The Executors (both aggregate and raw) are complex
|
||||||
|
// enough that changing them to write back to the DB is going to be clumsy
|
||||||
|
//
|
||||||
|
// it might seem weird to have the write be in the QueryExecutor, but the interweaving of
|
||||||
|
// limitedRowWriter and ExecuteAggregate/Raw makes it ridiculously hard to make sure that the
|
||||||
|
// results will be the same as when queried normally.
|
||||||
|
measurement := intoMeasurement(selectstmt)
|
||||||
|
intodb, err := intoDB(selectstmt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
rp := intoRP(selectstmt)
|
||||||
// Execute plan.
|
points, err := convertRowToPoints(measurement, row)
|
||||||
ch := e.Execute()
|
if err != nil {
|
||||||
|
return err
|
||||||
// Stream results from the channel. We should send an empty result if nothing comes through.
|
|
||||||
resultSent := false
|
|
||||||
for row := range ch {
|
|
||||||
if row.Err != nil {
|
|
||||||
return row.Err
|
|
||||||
}
|
}
|
||||||
resultSent = true
|
for _, p := range points {
|
||||||
results <- &influxql.Result{StatementID: statementID, Series: []*models.Row{row}}
|
fields := p.Fields()
|
||||||
|
for _, v := range fields {
|
||||||
|
if v == nil {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !resultSent {
|
|
||||||
results <- &influxql.Result{StatementID: statementID, Series: make([]*models.Row, 0)}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
req := &IntoWriteRequest{
|
||||||
|
Database: intodb,
|
||||||
|
RetentionPolicy: rp,
|
||||||
|
Points: points,
|
||||||
|
}
|
||||||
|
err = q.IntoWriter.WritePointsInto(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QueryExecutor) executeShowTagValuesStatement(stmt *influxql.ShowTagValuesStatement, database string) *influxql.Result {
|
func (q *QueryExecutor) executeShowTagValuesStatement(stmt *influxql.ShowTagValuesStatement, database string) *influxql.Result {
|
||||||
|
// Check for time in WHERE clause (not supported).
|
||||||
|
if influxql.HasTimeExpr(stmt.Condition) {
|
||||||
|
return &influxql.Result{Err: errors.New("SHOW TAG VALUES doesn't support time in WHERE clause")}
|
||||||
|
}
|
||||||
|
|
||||||
// Find the database.
|
// Find the database.
|
||||||
db := q.Store.DatabaseIndex(database)
|
db := q.Store.DatabaseIndex(database)
|
||||||
if db == nil {
|
if db == nil {
|
||||||
|
|
|
@ -168,6 +168,8 @@ func (s *Store) DeleteDatabase(name string, shardIDs []uint64) error {
|
||||||
|
|
||||||
// ShardIDs returns a slice of all ShardIDs under management.
|
// ShardIDs returns a slice of all ShardIDs under management.
|
||||||
func (s *Store) ShardIDs() []uint64 {
|
func (s *Store) ShardIDs() []uint64 {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
ids := make([]uint64, 0, len(s.shards))
|
ids := make([]uint64, 0, len(s.shards))
|
||||||
for i, _ := range s.shards {
|
for i, _ := range s.shards {
|
||||||
ids = append(ids, i)
|
ids = append(ids, i)
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue