update godep to include paho.mqtt.
This commit is contained in:
parent
7e96a9afda
commit
0cd7d6ae55
|
@ -5,6 +5,11 @@
|
||||||
"./..."
|
"./..."
|
||||||
],
|
],
|
||||||
"Deps": [
|
"Deps": [
|
||||||
|
{
|
||||||
|
"ImportPath": "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git",
|
||||||
|
"Comment": "v0.9.1-14-g546c47a",
|
||||||
|
"Rev": "546c47a6d0e9492e77f6f37473d59c36a708e08b"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/Shopify/sarama",
|
"ImportPath": "github.com/Shopify/sarama",
|
||||||
"Comment": "v1.4.3-45-g5b18996",
|
"Comment": "v1.4.3-45-g5b18996",
|
||||||
|
@ -96,10 +101,40 @@
|
||||||
"Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"
|
"Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/influxdb/influxdb",
|
"ImportPath": "github.com/influxdb/influxdb/client",
|
||||||
"Comment": "v0.9.3",
|
"Comment": "v0.9.3",
|
||||||
"Rev": "5d42b212fca8facfe9db0c83822f09b88be643ec"
|
"Rev": "5d42b212fca8facfe9db0c83822f09b88be643ec"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/influxdb/influxdb/influxql",
|
||||||
|
"Comment": "v0.9.3",
|
||||||
|
"Rev": "5d42b212fca8facfe9db0c83822f09b88be643ec"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/influxdb/influxdb/meta",
|
||||||
|
"Comment": "v0.9.3",
|
||||||
|
"Rev": "5d42b212fca8facfe9db0c83822f09b88be643ec"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/influxdb/influxdb/snapshot",
|
||||||
|
"Comment": "v0.9.3",
|
||||||
|
"Rev": "5d42b212fca8facfe9db0c83822f09b88be643ec"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/influxdb/influxdb/toml",
|
||||||
|
"Comment": "v0.9.3",
|
||||||
|
"Rev": "5d42b212fca8facfe9db0c83822f09b88be643ec"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/influxdb/influxdb/tsdb",
|
||||||
|
"Comment": "v0.9.3",
|
||||||
|
"Rev": "5d42b212fca8facfe9db0c83822f09b88be643ec"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/influxdb/telegraf",
|
||||||
|
"Comment": "v0.1.9-22-g64a3a71",
|
||||||
|
"Rev": "64a3a718e6cac559b8d2ab459ab1780fca9c9e22"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/lib/pq",
|
"ImportPath": "github.com/lib/pq",
|
||||||
"Comment": "go1.0-cutoff-59-gb269bd0",
|
"Comment": "go1.0-cutoff-59-gb269bd0",
|
||||||
|
@ -117,6 +152,10 @@
|
||||||
"ImportPath": "github.com/naoina/toml",
|
"ImportPath": "github.com/naoina/toml",
|
||||||
"Rev": "5811abcabb29d6af0fdf060f96d328962bd3cd5e"
|
"Rev": "5811abcabb29d6af0fdf060f96d328962bd3cd5e"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/pborman/uuid",
|
||||||
|
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/prometheus/client_golang/extraction",
|
"ImportPath": "github.com/prometheus/client_golang/extraction",
|
||||||
"Comment": "0.7.0-22-gbbd006b",
|
"Comment": "0.7.0-22-gbbd006b",
|
||||||
|
@ -215,6 +254,10 @@
|
||||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
"ImportPath": "golang.org/x/crypto/blowfish",
|
||||||
"Rev": "173ce04bfaf66c7bb0fa9d5c0bfd93e773909dbd"
|
"Rev": "173ce04bfaf66c7bb0fa9d5c0bfd93e773909dbd"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "golang.org/x/net/websocket",
|
||||||
|
"Rev": "db8e4de5b2d6653f66aea53094624468caad15d2"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "gopkg.in/dancannon/gorethink.v1",
|
"ImportPath": "gopkg.in/dancannon/gorethink.v1",
|
||||||
"Comment": "v1.x.x",
|
"Comment": "v1.x.x",
|
||||||
|
@ -224,11 +267,6 @@
|
||||||
"ImportPath": "gopkg.in/mgo.v2",
|
"ImportPath": "gopkg.in/mgo.v2",
|
||||||
"Comment": "r2015.06.03-3-g3569c88",
|
"Comment": "r2015.06.03-3-g3569c88",
|
||||||
"Rev": "3569c88678d88179dcbd68d02ab081cbca3cd4d0"
|
"Rev": "3569c88678d88179dcbd68d02ab081cbca3cd4d0"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git",
|
|
||||||
"Comment": "v0.9.1-14-g546c47a",
|
|
||||||
"Rev": "546c47a6d0e9492e77f6f37473d59c36a708e08b"
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,67 +0,0 @@
|
||||||
*~
|
|
||||||
src/
|
|
||||||
|
|
||||||
config.json
|
|
||||||
/bin/
|
|
||||||
|
|
||||||
/pkg/
|
|
||||||
|
|
||||||
TAGS
|
|
||||||
|
|
||||||
# vim temp files
|
|
||||||
*.swp
|
|
||||||
|
|
||||||
*.test
|
|
||||||
/query/a.out*
|
|
||||||
.DS_Store
|
|
||||||
|
|
||||||
# ignore generated files.
|
|
||||||
cmd/influxd/version.go
|
|
||||||
|
|
||||||
# executables
|
|
||||||
|
|
||||||
influxd
|
|
||||||
**/influxd
|
|
||||||
!**/influxd/
|
|
||||||
|
|
||||||
influx
|
|
||||||
**/influx
|
|
||||||
!**/influx/
|
|
||||||
|
|
||||||
influxdb
|
|
||||||
**/influxdb
|
|
||||||
!**/influxdb/
|
|
||||||
|
|
||||||
/benchmark-tool
|
|
||||||
/main
|
|
||||||
/benchmark-storage
|
|
||||||
godef
|
|
||||||
gosym
|
|
||||||
gocode
|
|
||||||
inspect-raft
|
|
||||||
|
|
||||||
# dependencies
|
|
||||||
out_rpm/
|
|
||||||
packages/
|
|
||||||
|
|
||||||
# autconf
|
|
||||||
autom4te.cache/
|
|
||||||
config.log
|
|
||||||
config.status
|
|
||||||
Makefile
|
|
||||||
|
|
||||||
# log file
|
|
||||||
influxdb.log
|
|
||||||
benchmark.log
|
|
||||||
|
|
||||||
# config file
|
|
||||||
config.toml
|
|
||||||
|
|
||||||
# test data files
|
|
||||||
integration/migration_data/
|
|
||||||
|
|
||||||
# goide project files
|
|
||||||
.idea
|
|
||||||
|
|
||||||
# goconvey config files
|
|
||||||
*.goconvey
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,231 +0,0 @@
|
||||||
Contributing to InfluxDB
|
|
||||||
========================
|
|
||||||
|
|
||||||
Bug reports
|
|
||||||
---------------
|
|
||||||
Before you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. If you file an issue, please include the following.
|
|
||||||
* Full details of your operating system (or distribution) e.g. 64-bit Ubuntu 14.04.
|
|
||||||
* The version of InfluxDB you are running
|
|
||||||
* Whether you installed it using a pre-built package, or built it from source.
|
|
||||||
* A small test case, if applicable, that demonstrates the issues.
|
|
||||||
|
|
||||||
Remember the golden rule of bug reports: **The easier you make it for us to reproduce the problem, the faster it will get fixed.**
|
|
||||||
If you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html)
|
|
||||||
|
|
||||||
Test cases should be in the form of `curl` commands. For example:
|
|
||||||
```
|
|
||||||
# create database
|
|
||||||
curl -G http://localhost:8086/query --data-urlencode "q=CREATE DATABASE mydb"
|
|
||||||
|
|
||||||
# create retention policy
|
|
||||||
curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY myrp ON mydb DURATION 365d REPLICATION 1 DEFAULT"
|
|
||||||
|
|
||||||
# write data
|
|
||||||
curl -X POST http://localhost:8086/write --data-urlencode "db=mydb" --data-binary "cpu,region=useast,host=server_1,service=redis value=61"
|
|
||||||
|
|
||||||
# Delete a Measurement
|
|
||||||
curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=DROP MEASUREMENT cpu'
|
|
||||||
|
|
||||||
# Query the Measurement
|
|
||||||
# Bug: expected it to return no data, but data comes back.
|
|
||||||
curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=SELECT * from cpu'
|
|
||||||
```
|
|
||||||
**If you don't include a clear test case like this, your issue may not be investigated, and may even be closed**. If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report.
|
|
||||||
|
|
||||||
Please note that issues are *not the place to file general questions* such as "how do I use collectd with InfluxDB?" Questions of this nature should be sent to the [Google Group](https://groups.google.com/forum/#!forum/influxdb), not filed as issues. Issues like this will be closed.
|
|
||||||
|
|
||||||
Feature requests
|
|
||||||
---------------
|
|
||||||
We really like to receive feature requests, as it helps us prioritize our work. Please be clear about your requirements, as incomplete feature requests may simply be closed if we don't understand what you would like to see added to InfluxDB.
|
|
||||||
|
|
||||||
Contributing to the source code
|
|
||||||
---------------
|
|
||||||
|
|
||||||
InfluxDB follows standard Go project structure. This means that all
|
|
||||||
your go development are done in `$GOPATH/src`. GOPATH can be any
|
|
||||||
directory under which InfluxDB and all its dependencies will be
|
|
||||||
cloned. For more details on recommended go project's structure, see
|
|
||||||
[How to Write Go Code](http://golang.org/doc/code.html) and
|
|
||||||
[Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/), or you can just follow
|
|
||||||
the steps below.
|
|
||||||
|
|
||||||
Submitting a pull request
|
|
||||||
------------
|
|
||||||
To submit a pull request you should fork the InfluxDB repository, and make your change on a feature branch of your fork. Then generate a pull request from your branch against *master* of the InfluxDB repository. Include in your pull request details of your change -- the why *and* the how -- as well as the testing your performed. Also, be sure to run the test suite with your change in place. Changes that cause tests to fail cannot be merged.
|
|
||||||
|
|
||||||
There will usually be some back and forth as we finalize the change, but once that completes it may be merged.
|
|
||||||
|
|
||||||
To assist in review for the PR, please add the following to your pull request comment:
|
|
||||||
|
|
||||||
```md
|
|
||||||
- [ ] CHANGELOG.md updated
|
|
||||||
- [ ] Rebased/mergable
|
|
||||||
- [ ] Tests pass
|
|
||||||
- [ ] Sign [CLA](http://influxdb.com/community/cla.html) (if not already signed)
|
|
||||||
```
|
|
||||||
|
|
||||||
Use of third-party packages
|
|
||||||
------------
|
|
||||||
A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarly. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) as the storage engine. So to maximise the chance your change will be accepted by us, use only the standard libaries, or the third-party packages we have decided to use.
|
|
||||||
|
|
||||||
For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/).
|
|
||||||
|
|
||||||
Signing the CLA
|
|
||||||
---------------
|
|
||||||
|
|
||||||
If you are going to be contributing back to InfluxDB please take a
|
|
||||||
second to sign our CLA, which can be found
|
|
||||||
[on our website](http://influxdb.com/community/cla.html).
|
|
||||||
|
|
||||||
Installing Go
|
|
||||||
-------------
|
|
||||||
InfluxDB requires Go 1.4 or greater.
|
|
||||||
|
|
||||||
At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions
|
|
||||||
on how to install it see [the gvm page on github](https://github.com/moovweb/gvm).
|
|
||||||
|
|
||||||
After installing gvm you can install and set the default go version by
|
|
||||||
running the following:
|
|
||||||
|
|
||||||
gvm install go1.4
|
|
||||||
gvm use go1.4 --default
|
|
||||||
|
|
||||||
Revision Control Systems
|
|
||||||
------
|
|
||||||
Go has the ability to import remote packages via revision control systems with the `go get` command. To ensure that you can retrieve any remote package, be sure to install the following rcs software to your system.
|
|
||||||
Currently the project only depends on `git` and `mercurial`.
|
|
||||||
|
|
||||||
* [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git)
|
|
||||||
* [Install Mercurial](http://mercurial.selenic.com/wiki/Download)
|
|
||||||
|
|
||||||
Project structure
|
|
||||||
-----------------
|
|
||||||
First you need to setup the project structure:
|
|
||||||
|
|
||||||
export GOPATH=$HOME/gocodez
|
|
||||||
mkdir -p $GOPATH/src/github.com/influxdb
|
|
||||||
cd $GOPATH/src/github.com/influxdb
|
|
||||||
git clone git@github.com:influxdb/influxdb
|
|
||||||
|
|
||||||
You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh
|
|
||||||
file to be set for every shell instead of having to manually run it
|
|
||||||
everytime.
|
|
||||||
|
|
||||||
We have a pre commit hook to make sure code is formatted properly
|
|
||||||
and vetted before you commit any changes. We strongly recommend using the pre
|
|
||||||
commit hook to guard against accidentally committing unformatted
|
|
||||||
code. To use the pre-commit hook, run the following:
|
|
||||||
|
|
||||||
cd $GOPATH/src/github.com/influxdb/influxdb
|
|
||||||
cp .hooks/pre-commit .git/hooks/
|
|
||||||
|
|
||||||
In case the commit is rejected because it's not formatted you can run
|
|
||||||
the following to format the code:
|
|
||||||
|
|
||||||
```
|
|
||||||
go fmt ./...
|
|
||||||
go vet ./...
|
|
||||||
```
|
|
||||||
|
|
||||||
To install go vet, run the following command:
|
|
||||||
```
|
|
||||||
go get golang.org/x/tools/cmd/vet
|
|
||||||
```
|
|
||||||
|
|
||||||
NOTE: If you have not installed mercurial, the above command will fail. See [Revision Control Systems](#revision-control-systems) above.
|
|
||||||
|
|
||||||
For more information on `go vet`, [read the GoDoc](https://godoc.org/golang.org/x/tools/cmd/vet).
|
|
||||||
|
|
||||||
Build and Test
|
|
||||||
-----
|
|
||||||
|
|
||||||
Make sure you have Go installed and the project structure as shown above. To then build the project, execute the following commands:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd $GOPATH/src/github.com/influxdb
|
|
||||||
go get -u -f -t ./...
|
|
||||||
go build ./...
|
|
||||||
```
|
|
||||||
|
|
||||||
To then install the binaries, run the following command. They can be found in `$GOPATH/bin`. Please note that the InfluxDB binary is named `influxd`, not `influxdb`.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go install ./...
|
|
||||||
```
|
|
||||||
|
|
||||||
To set the version and commit flags during the build pass the following to the build command:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
-ldflags="-X main.version $VERSION -X main.branch $BRANCH -X main.commit $COMMIT"
|
|
||||||
```
|
|
||||||
|
|
||||||
where `$VERSION` is the version, `$BRANCH` is the branch, and `$COMMIT` is the git commit hash.
|
|
||||||
|
|
||||||
To run the tests, execute the following command:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd $GOPATH/src/github.com/influxdb/influxdb
|
|
||||||
go test -v ./...
|
|
||||||
|
|
||||||
# run tests that match some pattern
|
|
||||||
go test -run=TestDatabase . -v
|
|
||||||
|
|
||||||
# run tests and show coverage
|
|
||||||
go test -coverprofile /tmp/cover . && go tool cover -html /tmp/cover
|
|
||||||
```
|
|
||||||
|
|
||||||
To install go cover, run the following command:
|
|
||||||
```
|
|
||||||
go get golang.org/x/tools/cmd/cover
|
|
||||||
```
|
|
||||||
|
|
||||||
Generated Google Protobuf code
|
|
||||||
-----------------
|
|
||||||
Most changes to the source do not require that the generated protocol buffer code be changed. But if you need to modify the protocol buffer code, you'll first need to install the protocol buffers toolchain.
|
|
||||||
|
|
||||||
First install the [protocol buffer compiler](https://developers.google.com/protocol-buffers/
|
|
||||||
) 2.6.1 or later for your OS:
|
|
||||||
|
|
||||||
Then install the go plugins:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get github.com/gogo/protobuf/proto
|
|
||||||
go get github.com/gogo/protobuf/protoc-gen-gogo
|
|
||||||
go get github.com/gogo/protobuf/gogoproto
|
|
||||||
```
|
|
||||||
|
|
||||||
Finally run, `go generate` after updating any `*.proto` file:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go generate ./...
|
|
||||||
```
|
|
||||||
**Trouleshooting**
|
|
||||||
|
|
||||||
If generating the protobuf code is failing for you, check each of the following:
|
|
||||||
* Ensure the protobuf library can be found. Make sure that `LD_LIBRRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed.
|
|
||||||
* Ensure the command `protoc-gen-gogo`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`.
|
|
||||||
|
|
||||||
Profiling
|
|
||||||
-----
|
|
||||||
When troubleshooting problems with CPU or memory the Go toolchain can be helpful. You can start InfluxDB with CPU or memory profiling turned on. For example:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# start influx with profiling
|
|
||||||
./influxd -cpuprofile influxd.prof
|
|
||||||
# run queries, writes, whatever you're testing
|
|
||||||
# open up pprof
|
|
||||||
go tool pprof influxd influxd.prof
|
|
||||||
# once inside run "web", opens up browser with the CPU graph
|
|
||||||
# can also run "web <function name>" to zoom in. Or "list <function name>" to see specific lines
|
|
||||||
```
|
|
||||||
|
|
||||||
Continuous Integration testing
|
|
||||||
-----
|
|
||||||
InfluxDB uses CirceCI for continuous integration testing. To see how the code is built and tested, check out [this file](https://github.com/influxdb/influxdb/blob/master/circle-test.sh). It closely follows the build and test process outlined above. You can see the exact version of Go InfluxDB uses for testing by consulting that file.
|
|
||||||
|
|
||||||
Useful links
|
|
||||||
------------
|
|
||||||
- [Useful techniques in Go](http://arslan.io/ten-useful-techniques-in-go)
|
|
||||||
- [Go in production](http://peter.bourgon.org/go-in-production/)
|
|
||||||
- [Principles of designing Go APIs with channels](https://inconshreveable.com/07-08-2014/principles-of-designing-go-apis-with-channels/)
|
|
||||||
- [Common mistakes in Golang](http://soryy.com/blog/2014/common-mistakes-with-go-lang/). Especially this section `Loops, Closures, and Local Variables`
|
|
|
@ -1,44 +0,0 @@
|
||||||
# Docker Setup
|
|
||||||
========================
|
|
||||||
|
|
||||||
This document describes how to build and run a minimal InfluxDB container under Docker. Currently, it has only been tested for local development and assumes that you have a working docker environment.
|
|
||||||
|
|
||||||
## Building Image
|
|
||||||
|
|
||||||
To build a docker image for InfluxDB from your current checkout, run the following:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ ./build-docker.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
This script uses the `golang:1.5` image to build a fully static binary of `influxd` and then adds it to a minimal `scratch` image.
|
|
||||||
|
|
||||||
To build the image using a different version of go:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ GO_VER=1.4.2 ./build-docker.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
Available version can be found [here](https://hub.docker.com/_/golang/).
|
|
||||||
|
|
||||||
## Single Node Container
|
|
||||||
|
|
||||||
This will start an interactive, single-node, that publishes the containers port `8086` and `8088` to the hosts ports `8086` and `8088` respectively. This is identical to starting `influxd` manually.
|
|
||||||
|
|
||||||
```
|
|
||||||
$ docker run -it -p 8086:8086 -p 8088:8088 influxdb
|
|
||||||
```
|
|
||||||
|
|
||||||
## Multi-Node Cluster
|
|
||||||
|
|
||||||
This will create a simple 3-node cluster. The data is stored within the container and will be lost when the container is removed. This is only useful for test clusters.
|
|
||||||
|
|
||||||
The `HOST_IP` env variable should be your host IP if running under linux or the virtualbox VM IP if running under OSX. On OSX, this would be something like: `$(docker-machine ip dev)` or `$(boot2docker ip)` depending on which docker tool you are using.
|
|
||||||
|
|
||||||
```
|
|
||||||
$ export HOST_IP=<your host/VM IP>
|
|
||||||
$ docker run -it -p 8086:8088 -p 8088:8088 influxdb -hostname $HOST_IP:8088
|
|
||||||
$ docker run -it -p 8186:8088 -p 8188:8088 influxdb -hostname $HOST_IP:8188 -join $HOST_IP:8088
|
|
||||||
$ docker run -it -p 8286:8088 -p 8288:8088 influxdb -hostname $HOST_IP:8288 -join $HOST_IP:8088
|
|
||||||
```
|
|
||||||
|
|
|
@ -1,24 +0,0 @@
|
||||||
FROM busybox:ubuntu-14.04
|
|
||||||
|
|
||||||
MAINTAINER Jason Wilder "<jason@influxdb.com>"
|
|
||||||
|
|
||||||
# admin, http, udp, cluster, graphite, opentsdb, collectd
|
|
||||||
EXPOSE 8083 8086 8086/udp 8088 2003 4242 25826
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# copy binary into image
|
|
||||||
COPY influxd /app/
|
|
||||||
|
|
||||||
# Add influxd to the PATH
|
|
||||||
ENV PATH=/app:$PATH
|
|
||||||
|
|
||||||
# Generate a default config
|
|
||||||
RUN influxd config > /etc/influxdb.toml
|
|
||||||
|
|
||||||
# Use /data for all disk storage
|
|
||||||
RUN sed -i 's/dir = "\/.*influxdb/dir = "\/data/' /etc/influxdb.toml
|
|
||||||
|
|
||||||
VOLUME ["/data"]
|
|
||||||
|
|
||||||
ENTRYPOINT ["influxd", "--config", "/etc/influxdb.toml"]
|
|
|
@ -1,20 +0,0 @@
|
||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2013-2015 Errplane Inc.
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
||||||
this software and associated documentation files (the "Software"), to deal in
|
|
||||||
the Software without restriction, including without limitation the rights to
|
|
||||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
|
||||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
|
||||||
subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
|
||||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
|
||||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
|
||||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
@ -1,180 +0,0 @@
|
||||||
The top level name is called a measurement. These names can contain any characters. Then there are field names, field values, tag keys and tag values, which can also contain any characters. However, if the measurement, field, or tag contains any character other than [A-Z,a-z,0-9,_], or if it starts with a digit, it must be double-quoted. Therefore anywhere a measurement name, field name, field value, tag name, or tag value appears it should be wrapped in double quotes.
|
|
||||||
|
|
||||||
# Databases & retention policies
|
|
||||||
|
|
||||||
```sql
|
|
||||||
-- create a database
|
|
||||||
CREATE DATABASE <name>
|
|
||||||
|
|
||||||
-- create a retention policy
|
|
||||||
CREATE RETENTION POLICY <rp-name> ON <db-name> DURATION <duration> REPLICATION <n> [DEFAULT]
|
|
||||||
|
|
||||||
-- alter retention policy
|
|
||||||
ALTER RETENTION POLICY <rp-name> ON <db-name> (DURATION <duration> | REPLICATION <n> | DEFAULT)+
|
|
||||||
|
|
||||||
-- drop a database
|
|
||||||
DROP DATABASE <name>
|
|
||||||
|
|
||||||
-- drop a retention policy
|
|
||||||
DROP RETENTION POLICY <rp-name> ON <db-name>
|
|
||||||
```
|
|
||||||
where `<duration>` is either `INF` for infinite retention, or an integer followed by the desired unit of time: u,ms,s,m,h,d,w for microseconds, milliseconds, seconds, minutes, hours, days, or weeks, respectively. `<replication>` must be an integer.
|
|
||||||
|
|
||||||
If present, `DEFAULT` sets the retention policy as the default retention policy for writes and reads.
|
|
||||||
|
|
||||||
# Users and permissions
|
|
||||||
|
|
||||||
```sql
|
|
||||||
-- create user
|
|
||||||
CREATE USER <name> WITH PASSWORD '<password>'
|
|
||||||
|
|
||||||
-- grant privilege on a database
|
|
||||||
GRANT <privilege> ON <db> TO <user>
|
|
||||||
|
|
||||||
-- grant cluster admin privileges
|
|
||||||
GRANT ALL [PRIVILEGES] TO <user>
|
|
||||||
|
|
||||||
-- revoke privilege
|
|
||||||
REVOKE <privilege> ON <db> FROM <user>
|
|
||||||
|
|
||||||
-- revoke all privileges for a DB
|
|
||||||
REVOKE ALL [PRIVILEGES] ON <db> FROM <user>
|
|
||||||
|
|
||||||
-- revoke all privileges including cluster admin
|
|
||||||
REVOKE ALL [PRIVILEGES] FROM <user>
|
|
||||||
|
|
||||||
-- combine db creation with privilege assignment (user must already exist)
|
|
||||||
CREATE DATABASE <name> GRANT <privilege> TO <user>
|
|
||||||
CREATE DATABASE <name> REVOKE <privilege> FROM <user>
|
|
||||||
|
|
||||||
-- delete a user
|
|
||||||
DROP USER <name>
|
|
||||||
|
|
||||||
|
|
||||||
```
|
|
||||||
where `<privilege> := READ | WRITE | All `.
|
|
||||||
|
|
||||||
Authentication must be enabled in the influxdb.conf file for user permissions to be in effect.
|
|
||||||
|
|
||||||
By default, newly created users have no privileges to any databases.
|
|
||||||
|
|
||||||
Cluster administration privileges automatically grant full read and write permissions to all databases, regardless of subsequent database-specific privilege revocation statements.
|
|
||||||
|
|
||||||
# Select
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SELECT mean(value) from cpu WHERE host = 'serverA' AND time > now() - 4h GROUP BY time(5m)
|
|
||||||
|
|
||||||
SELECT mean(value) from cpu WHERE time > now() - 4h GROUP BY time(5m), region
|
|
||||||
```
|
|
||||||
|
|
||||||
## Group By
|
|
||||||
|
|
||||||
# Delete
|
|
||||||
|
|
||||||
# Series
|
|
||||||
|
|
||||||
## Destroy
|
|
||||||
|
|
||||||
```sql
|
|
||||||
DROP MEASUREMENT <name>
|
|
||||||
DROP MEASUREMENT cpu WHERE region = 'uswest'
|
|
||||||
```
|
|
||||||
|
|
||||||
## Show
|
|
||||||
|
|
||||||
Show series queries are for pulling out individual series from measurement names and tag data. They're useful for discovery.
|
|
||||||
|
|
||||||
```sql
|
|
||||||
-- show all databases
|
|
||||||
SHOW DATABASES
|
|
||||||
|
|
||||||
-- show measurement names
|
|
||||||
SHOW MEASUREMENTS
|
|
||||||
SHOW MEASUREMENTS LIMIT 15
|
|
||||||
SHOW MEASUREMENTS LIMIT 10 OFFSET 40
|
|
||||||
SHOW MEASUREMENTS WHERE service = 'redis'
|
|
||||||
-- LIMIT and OFFSET can be applied to any of the SHOW type queries
|
|
||||||
|
|
||||||
-- show all series across all measurements/tagsets
|
|
||||||
SHOW SERIES
|
|
||||||
|
|
||||||
-- get a show of all series for any measurements where tag key region = tak value 'uswest'
|
|
||||||
SHOW SERIES WHERE region = 'uswest'
|
|
||||||
|
|
||||||
SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10
|
|
||||||
|
|
||||||
-- returns the 100 - 109 rows in the result. In the case of SHOW SERIES, which returns
|
|
||||||
-- series split into measurements. Each series counts as a row. So you could see only a
|
|
||||||
-- single measurement returned, but 10 series within it.
|
|
||||||
SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 OFFSET 100
|
|
||||||
|
|
||||||
-- show all retention policies on a database
|
|
||||||
SHOW RETENTION POLICIES ON mydb
|
|
||||||
|
|
||||||
-- get a show of all tag keys across all measurements
|
|
||||||
SHOW TAG KEYS
|
|
||||||
|
|
||||||
-- show all the tag keys for a given measurement
|
|
||||||
SHOW TAG KEYS FROM cpu
|
|
||||||
SHOW TAG KEYS FROM temperature, wind_speed
|
|
||||||
|
|
||||||
-- show all the tag values. note that a single WHERE TAG KEY = '...' clause is required
|
|
||||||
SHOW TAG VALUES WITH TAG KEY = 'region'
|
|
||||||
SHOW TAG VALUES FROM cpu WHERE region = 'uswest' WITH TAG KEY = 'host'
|
|
||||||
|
|
||||||
-- and you can do stuff against fields
|
|
||||||
SHOW FIELD KEYS FROM cpu
|
|
||||||
|
|
||||||
-- but you can't do this
|
|
||||||
SHOW FIELD VALUES
|
|
||||||
-- we don't index field values, so this query should be invalid.
|
|
||||||
|
|
||||||
-- show all users
|
|
||||||
SHOW USERS
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that `FROM` and `WHERE` are optional clauses in most of the show series queries.
|
|
||||||
|
|
||||||
And the show series output looks like this:
|
|
||||||
|
|
||||||
```json
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"name": "cpu",
|
|
||||||
"columns": ["id", "region", "host"],
|
|
||||||
"values": [
|
|
||||||
1, "uswest", "servera",
|
|
||||||
2, "uswest", "serverb"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "reponse_time",
|
|
||||||
"columns": ["id", "application", "host"],
|
|
||||||
"values": [
|
|
||||||
3, "myRailsApp", "servera"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
# Continuous Queries
|
|
||||||
|
|
||||||
Continous queries are going to be inspired by MySQL `TRIGGER` syntax:
|
|
||||||
|
|
||||||
http://dev.mysql.com/doc/refman/5.0/en/trigger-syntax.html
|
|
||||||
|
|
||||||
Instead of having automatically-assigned ids, named continuous queries allows for some level of duplication prevention,
|
|
||||||
particularly in the case where creation is scripted.
|
|
||||||
|
|
||||||
## Create
|
|
||||||
|
|
||||||
CREATE CONTINUOUS QUERY <name> AS SELECT ... FROM ...
|
|
||||||
|
|
||||||
## Destroy
|
|
||||||
|
|
||||||
DROP CONTINUOUS QUERY <name>
|
|
||||||
|
|
||||||
## List
|
|
||||||
|
|
||||||
SHOW CONTINUOUS QUERIES
|
|
|
@ -1,71 +0,0 @@
|
||||||
# InfluxDB [![Circle CI](https://circleci.com/gh/influxdb/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdb/influxdb/tree/master)
|
|
||||||
|
|
||||||
## An Open-Source, Distributed, Time Series Database
|
|
||||||
|
|
||||||
> InfluxDB v0.9.0 is now out. Going forward, the 0.9.x series of releases will not make breaking API changes or breaking changes to the underlying data storage. However, 0.9.0 clustering should be considered an alpha release.
|
|
||||||
|
|
||||||
InfluxDB is an open source **distributed time series database** with
|
|
||||||
**no external dependencies**. It's useful for recording metrics,
|
|
||||||
events, and performing analytics.
|
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
* Built-in [HTTP API](http://influxdb.com/docs/v0.9/concepts/reading_and_writing_data.html) so you don't have to write any server side code to get up and running.
|
|
||||||
* Data can be tagged, allowing very flexible querying.
|
|
||||||
* SQL-like query language.
|
|
||||||
* Clustering is supported out of the box, so that you can scale horizontally to handle your data.
|
|
||||||
* Simple to install and manage, and fast to get data in and out.
|
|
||||||
* It aims to answer queries in real-time. That means every data point is
|
|
||||||
indexed as it comes in and is immediately available in queries that
|
|
||||||
should return in < 100ms.
|
|
||||||
|
|
||||||
## Getting Started
|
|
||||||
*The following directions apply only to the 0.9.0 release or building from the source on master.*
|
|
||||||
|
|
||||||
### Building
|
|
||||||
|
|
||||||
You don't need to build the project to use it - you can use any of our
|
|
||||||
[pre-built packages](http://influxdb.com/download/index.html) to install InfluxDB. That's
|
|
||||||
the recommended way to get it running. However, if you want to contribute to the core of InfluxDB, you'll need to build.
|
|
||||||
For those adventurous enough, you can
|
|
||||||
[follow along on our docs](http://github.com/influxdb/influxdb/blob/master/CONTRIBUTING.md).
|
|
||||||
|
|
||||||
### Starting InfluxDB
|
|
||||||
* `service influxdb start` if you have installed InfluxDB using an official Debian or RPM package.
|
|
||||||
* `$GOPATH/bin/influxd` if you have built InfluxDB from source.
|
|
||||||
|
|
||||||
### Creating your first database
|
|
||||||
|
|
||||||
```
|
|
||||||
curl -G 'http://localhost:8086/query' --data-urlencode "q=CREATE DATABASE mydb"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Insert some data
|
|
||||||
```
|
|
||||||
curl -XPOST 'http://localhost:8086/write?db=mydb' \
|
|
||||||
-d 'cpu,host=server01,region=uswest load=42 1434055562000000000'
|
|
||||||
|
|
||||||
curl -XPOST 'http://localhost:8086/write?db=mydb' \
|
|
||||||
-d 'cpu,host=server02,region=uswest load=78 1434055562000000000'
|
|
||||||
|
|
||||||
curl -XPOST 'http://localhost:8086/write?db=mydb' \
|
|
||||||
-d 'cpu,host=server03,region=useast load=15.4 1434055562000000000'
|
|
||||||
```
|
|
||||||
|
|
||||||
### Query for the data
|
|
||||||
```JSON
|
|
||||||
curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \
|
|
||||||
--data-urlencode "q=SELECT * FROM cpu WHERE host='server01' AND time < now - 1d"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Analyze the data
|
|
||||||
```JSON
|
|
||||||
curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \
|
|
||||||
--data-urlencode "q=SELECT mean(load) FROM cpu WHERE region='uswest'"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Helpful Links
|
|
||||||
|
|
||||||
* Understand the [design goals and motivations of the project](http://influxdb.com/docs/v0.9/introduction/overview.html).
|
|
||||||
* Follow the [getting started guide](http://influxdb.com/docs/v0.9/introduction/getting_started.html) to find out how to install InfluxDB, start writing more data, and issue more queries - in just a few minutes.
|
|
||||||
* See the [HTTP API documentation to start writing a library for your favorite language](http://influxdb.com/docs/v0.9/concepts/reading_and_writing_data.html).
|
|
|
@ -1,78 +0,0 @@
|
||||||
package influxdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/rand"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Balancer represents a load-balancing algorithm for a set of nodes
|
|
||||||
type Balancer interface {
|
|
||||||
// Next returns the next Node according to the balancing method
|
|
||||||
// or nil if there are no nodes available
|
|
||||||
Next() *meta.NodeInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
type nodeBalancer struct {
|
|
||||||
nodes []meta.NodeInfo // data nodes to balance between
|
|
||||||
p int // current node index
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNodeBalancer create a shuffled, round-robin balancer so that
|
|
||||||
// multiple instances will return nodes in randomized order and each
|
|
||||||
// each returned node will be repeated in a cycle
|
|
||||||
func NewNodeBalancer(nodes []meta.NodeInfo) Balancer {
|
|
||||||
// make a copy of the node slice so we can randomize it
|
|
||||||
// without affecting the original instance as well as ensure
|
|
||||||
// that each Balancer returns nodes in a different order
|
|
||||||
b := &nodeBalancer{}
|
|
||||||
|
|
||||||
b.nodes = make([]meta.NodeInfo, len(nodes))
|
|
||||||
copy(b.nodes, nodes)
|
|
||||||
|
|
||||||
b.shuffle()
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// shuffle randomizes the ordering the balancers available nodes
|
|
||||||
func (b *nodeBalancer) shuffle() {
|
|
||||||
for i := range b.nodes {
|
|
||||||
j := rand.Intn(i + 1)
|
|
||||||
b.nodes[i], b.nodes[j] = b.nodes[j], b.nodes[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// online returns a slice of the nodes that are online
|
|
||||||
func (b *nodeBalancer) online() []meta.NodeInfo {
|
|
||||||
return b.nodes
|
|
||||||
// now := time.Now().UTC()
|
|
||||||
// up := []meta.NodeInfo{}
|
|
||||||
// for _, n := range b.nodes {
|
|
||||||
// if n.OfflineUntil.After(now) {
|
|
||||||
// continue
|
|
||||||
// }
|
|
||||||
// up = append(up, n)
|
|
||||||
// }
|
|
||||||
// return up
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the next available nodes
|
|
||||||
func (b *nodeBalancer) Next() *meta.NodeInfo {
|
|
||||||
// only use online nodes
|
|
||||||
up := b.online()
|
|
||||||
|
|
||||||
// no nodes online
|
|
||||||
if len(up) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// rollover back to the beginning
|
|
||||||
if b.p >= len(up) {
|
|
||||||
b.p = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
d := &up[b.p]
|
|
||||||
b.p += 1
|
|
||||||
|
|
||||||
return d
|
|
||||||
}
|
|
|
@ -1,115 +0,0 @@
|
||||||
package influxdb_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewNodes() []meta.NodeInfo {
|
|
||||||
var nodes []meta.NodeInfo
|
|
||||||
for i := 1; i <= 2; i++ {
|
|
||||||
nodes = append(nodes, meta.NodeInfo{
|
|
||||||
ID: uint64(i),
|
|
||||||
Host: fmt.Sprintf("localhost:999%d", i),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBalancerEmptyNodes(t *testing.T) {
|
|
||||||
b := influxdb.NewNodeBalancer([]meta.NodeInfo{})
|
|
||||||
got := b.Next()
|
|
||||||
if got != nil {
|
|
||||||
t.Errorf("expected nil, got %v", got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBalancerUp(t *testing.T) {
|
|
||||||
nodes := NewNodes()
|
|
||||||
b := influxdb.NewNodeBalancer(nodes)
|
|
||||||
|
|
||||||
// First node in randomized round-robin order
|
|
||||||
first := b.Next()
|
|
||||||
if first == nil {
|
|
||||||
t.Errorf("expected datanode, got %v", first)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second node in randomized round-robin order
|
|
||||||
second := b.Next()
|
|
||||||
if second == nil {
|
|
||||||
t.Errorf("expected datanode, got %v", second)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should never get the same node in order twice
|
|
||||||
if first.ID == second.ID {
|
|
||||||
t.Errorf("expected first != second. got %v = %v", first.ID, second.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
func TestBalancerDown(t *testing.T) {
|
|
||||||
nodes := NewNodes()
|
|
||||||
b := influxdb.NewNodeBalancer(nodes)
|
|
||||||
|
|
||||||
nodes[0].Down()
|
|
||||||
|
|
||||||
// First node in randomized round-robin order
|
|
||||||
first := b.Next()
|
|
||||||
if first == nil {
|
|
||||||
t.Errorf("expected datanode, got %v", first)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second node should rollover to the first up node
|
|
||||||
second := b.Next()
|
|
||||||
if second == nil {
|
|
||||||
t.Errorf("expected datanode, got %v", second)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Health node should be returned each time
|
|
||||||
if first.ID != 2 && first.ID != second.ID {
|
|
||||||
t.Errorf("expected first != second. got %v = %v", first.ID, second.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
func TestBalancerBackUp(t *testing.T) {
|
|
||||||
nodes := newDataNodes()
|
|
||||||
b := influxdb.NewNodeBalancer(nodes)
|
|
||||||
|
|
||||||
nodes[0].Down()
|
|
||||||
|
|
||||||
for i := 0; i < 3; i++ {
|
|
||||||
got := b.Next()
|
|
||||||
if got == nil {
|
|
||||||
t.Errorf("expected datanode, got %v", got)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := uint64(2); got.ID != exp {
|
|
||||||
t.Errorf("wrong node id: exp %v, got %v", exp, got.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nodes[0].Up()
|
|
||||||
|
|
||||||
// First node in randomized round-robin order
|
|
||||||
first := b.Next()
|
|
||||||
if first == nil {
|
|
||||||
t.Errorf("expected datanode, got %v", first)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second node should rollover to the first up node
|
|
||||||
second := b.Next()
|
|
||||||
if second == nil {
|
|
||||||
t.Errorf("expected datanode, got %v", second)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should get both nodes returned
|
|
||||||
if first.ID == second.ID {
|
|
||||||
t.Errorf("expected first != second. got %v = %v", first.ID, second.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
|
@ -1,7 +0,0 @@
|
||||||
#!/bin/sh -x -e
|
|
||||||
|
|
||||||
GO_VER=${GO_VER:-1.5}
|
|
||||||
|
|
||||||
docker run -it -v "$GOPATH":/gopath -v "$(pwd)":/app -e "GOPATH=/gopath" -w /app golang:$GO_VER sh -c 'CGO_ENABLED=0 go build -a --installsuffix cgo --ldflags="-s" -o influxd ./cmd/influxd'
|
|
||||||
|
|
||||||
docker build -t influxdb .
|
|
|
@ -1,63 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# This is the InfluxDB CircleCI test script. Using this script allows total control
|
|
||||||
# the environment in which the build and test is run, and matches the official
|
|
||||||
# build process for InfluxDB.
|
|
||||||
|
|
||||||
BUILD_DIR=$HOME/influxdb-build
|
|
||||||
GO_VERSION=go1.4.2
|
|
||||||
PARALLELISM="-parallel 256"
|
|
||||||
TIMEOUT="-timeout 480s"
|
|
||||||
|
|
||||||
# Executes the given statement, and exits if the command returns a non-zero code.
|
|
||||||
function exit_if_fail {
|
|
||||||
command=$@
|
|
||||||
echo "Executing '$command'"
|
|
||||||
$command
|
|
||||||
rc=$?
|
|
||||||
if [ $rc -ne 0 ]; then
|
|
||||||
echo "'$command' returned $rc."
|
|
||||||
exit $rc
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
source $HOME/.gvm/scripts/gvm
|
|
||||||
exit_if_fail gvm use $GO_VERSION
|
|
||||||
|
|
||||||
# Set up the build directory, and then GOPATH.
|
|
||||||
exit_if_fail mkdir $BUILD_DIR
|
|
||||||
export GOPATH=$BUILD_DIR
|
|
||||||
exit_if_fail mkdir -p $GOPATH/src/github.com/influxdb
|
|
||||||
|
|
||||||
# Dump some test config to the log.
|
|
||||||
echo "Test configuration"
|
|
||||||
echo "========================================"
|
|
||||||
echo "\$HOME: $HOME"
|
|
||||||
echo "\$GOPATH: $GOPATH"
|
|
||||||
echo "\$CIRCLE_BRANCH: $CIRCLE_BRANCH"
|
|
||||||
|
|
||||||
# Move the checked-out source to a better location.
|
|
||||||
exit_if_fail mv $HOME/influxdb $GOPATH/src/github.com/influxdb
|
|
||||||
exit_if_fail cd $GOPATH/src/github.com/influxdb/influxdb
|
|
||||||
exit_if_fail git branch --set-upstream-to=origin/$CIRCLE_BRANCH $CIRCLE_BRANCH
|
|
||||||
|
|
||||||
# Install the code.
|
|
||||||
exit_if_fail cd $GOPATH/src/github.com/influxdb/influxdb
|
|
||||||
exit_if_fail go get -t -d -v ./...
|
|
||||||
exit_if_fail git checkout $CIRCLE_BRANCH # 'go get' switches to master. Who knew? Switch back.
|
|
||||||
exit_if_fail go build -v ./...
|
|
||||||
|
|
||||||
# Run the tests.
|
|
||||||
exit_if_fail go tool vet --composites=false .
|
|
||||||
case $CIRCLE_NODE_INDEX in
|
|
||||||
0)
|
|
||||||
go test $PARALLELISM $TIMEOUT -v ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs.txt
|
|
||||||
rc=${PIPESTATUS[0]}
|
|
||||||
;;
|
|
||||||
1)
|
|
||||||
GORACE="halt_on_error=1" go test $PARALLELISM $TIMEOUT -v -race ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs_race.txt
|
|
||||||
rc=${PIPESTATUS[0]}
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
exit $rc
|
|
|
@ -1,12 +0,0 @@
|
||||||
machine:
|
|
||||||
pre:
|
|
||||||
- bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer)
|
|
||||||
- source $HOME/.gvm/scripts/gvm; gvm install go1.4.2 --binary
|
|
||||||
|
|
||||||
dependencies:
|
|
||||||
override:
|
|
||||||
- echo "Dummy override, so no Circle dependencies execute"
|
|
||||||
test:
|
|
||||||
override:
|
|
||||||
- bash circle-test.sh:
|
|
||||||
parallel: true
|
|
|
@ -1,57 +0,0 @@
|
||||||
package cluster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"gopkg.in/fatih/pool.v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type clientPool struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
pool map[uint64]pool.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newClientPool() *clientPool {
|
|
||||||
return &clientPool{
|
|
||||||
pool: make(map[uint64]pool.Pool),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *clientPool) setPool(nodeID uint64, p pool.Pool) {
|
|
||||||
c.mu.Lock()
|
|
||||||
c.pool[nodeID] = p
|
|
||||||
c.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *clientPool) getPool(nodeID uint64) (pool.Pool, bool) {
|
|
||||||
c.mu.RLock()
|
|
||||||
p, ok := c.pool[nodeID]
|
|
||||||
c.mu.RUnlock()
|
|
||||||
return p, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *clientPool) size() int {
|
|
||||||
c.mu.RLock()
|
|
||||||
var size int
|
|
||||||
for _, p := range c.pool {
|
|
||||||
size += p.Len()
|
|
||||||
}
|
|
||||||
c.mu.RUnlock()
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *clientPool) conn(nodeID uint64) (net.Conn, error) {
|
|
||||||
c.mu.RLock()
|
|
||||||
conn, err := c.pool[nodeID].Get()
|
|
||||||
c.mu.RUnlock()
|
|
||||||
return conn, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *clientPool) close() {
|
|
||||||
c.mu.Lock()
|
|
||||||
for _, p := range c.pool {
|
|
||||||
p.Close()
|
|
||||||
}
|
|
||||||
c.mu.Unlock()
|
|
||||||
}
|
|
|
@ -1,35 +0,0 @@
|
||||||
package cluster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/toml"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultWriteTimeout is the default timeout for a complete write to succeed.
|
|
||||||
DefaultWriteTimeout = 5 * time.Second
|
|
||||||
|
|
||||||
// DefaultShardWriterTimeout is the default timeout set on shard writers.
|
|
||||||
DefaultShardWriterTimeout = 5 * time.Second
|
|
||||||
|
|
||||||
// DefaultShardMapperTimeout is the default timeout set on shard mappers.
|
|
||||||
DefaultShardMapperTimeout = 5 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config represents the configuration for the clustering service.
|
|
||||||
type Config struct {
|
|
||||||
ForceRemoteShardMapping bool `toml:"force-remote-mapping"`
|
|
||||||
WriteTimeout toml.Duration `toml:"write-timeout"`
|
|
||||||
ShardWriterTimeout toml.Duration `toml:"shard-writer-timeout"`
|
|
||||||
ShardMapperTimeout toml.Duration `toml:"shard-mapper-timeout"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConfig returns an instance of Config with defaults.
|
|
||||||
func NewConfig() Config {
|
|
||||||
return Config{
|
|
||||||
WriteTimeout: toml.Duration(DefaultWriteTimeout),
|
|
||||||
ShardWriterTimeout: toml.Duration(DefaultShardWriterTimeout),
|
|
||||||
ShardMapperTimeout: toml.Duration(DefaultShardMapperTimeout),
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,27 +0,0 @@
|
||||||
package cluster_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
"github.com/influxdb/influxdb/cluster"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConfig_Parse(t *testing.T) {
|
|
||||||
// Parse configuration.
|
|
||||||
var c cluster.Config
|
|
||||||
if _, err := toml.Decode(`
|
|
||||||
shard-writer-timeout = "10s"
|
|
||||||
write-timeout = "20s"
|
|
||||||
`, &c); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate configuration.
|
|
||||||
if time.Duration(c.ShardWriterTimeout) != 10*time.Second {
|
|
||||||
t.Fatalf("unexpected shard-writer timeout: %s", c.ShardWriterTimeout)
|
|
||||||
} else if time.Duration(c.WriteTimeout) != 20*time.Second {
|
|
||||||
t.Fatalf("unexpected write timeout s: %s", c.WriteTimeout)
|
|
||||||
}
|
|
||||||
}
|
|
286
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go
generated
vendored
286
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go
generated
vendored
|
@ -1,286 +0,0 @@
|
||||||
// Code generated by protoc-gen-gogo.
|
|
||||||
// source: internal/data.proto
|
|
||||||
// DO NOT EDIT!
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package internal is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
internal/data.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
WriteShardRequest
|
|
||||||
Field
|
|
||||||
Tag
|
|
||||||
Point
|
|
||||||
WriteShardResponse
|
|
||||||
MapShardRequest
|
|
||||||
MapShardResponse
|
|
||||||
*/
|
|
||||||
package internal
|
|
||||||
|
|
||||||
import proto "github.com/gogo/protobuf/proto"
|
|
||||||
import math "math"
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
type WriteShardRequest struct {
|
|
||||||
ShardID *uint64 `protobuf:"varint,1,req" json:"ShardID,omitempty"`
|
|
||||||
Points []*Point `protobuf:"bytes,2,rep" json:"Points,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *WriteShardRequest) Reset() { *m = WriteShardRequest{} }
|
|
||||||
func (m *WriteShardRequest) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*WriteShardRequest) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *WriteShardRequest) GetShardID() uint64 {
|
|
||||||
if m != nil && m.ShardID != nil {
|
|
||||||
return *m.ShardID
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *WriteShardRequest) GetPoints() []*Point {
|
|
||||||
if m != nil {
|
|
||||||
return m.Points
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Field struct {
|
|
||||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
|
||||||
Int32 *int32 `protobuf:"varint,2,opt" json:"Int32,omitempty"`
|
|
||||||
Int64 *int64 `protobuf:"varint,3,opt" json:"Int64,omitempty"`
|
|
||||||
Float64 *float64 `protobuf:"fixed64,4,opt" json:"Float64,omitempty"`
|
|
||||||
Bool *bool `protobuf:"varint,5,opt" json:"Bool,omitempty"`
|
|
||||||
String_ *string `protobuf:"bytes,6,opt" json:"String,omitempty"`
|
|
||||||
Bytes []byte `protobuf:"bytes,7,opt" json:"Bytes,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Field) Reset() { *m = Field{} }
|
|
||||||
func (m *Field) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Field) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *Field) GetName() string {
|
|
||||||
if m != nil && m.Name != nil {
|
|
||||||
return *m.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Field) GetInt32() int32 {
|
|
||||||
if m != nil && m.Int32 != nil {
|
|
||||||
return *m.Int32
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Field) GetInt64() int64 {
|
|
||||||
if m != nil && m.Int64 != nil {
|
|
||||||
return *m.Int64
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Field) GetFloat64() float64 {
|
|
||||||
if m != nil && m.Float64 != nil {
|
|
||||||
return *m.Float64
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Field) GetBool() bool {
|
|
||||||
if m != nil && m.Bool != nil {
|
|
||||||
return *m.Bool
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Field) GetString_() string {
|
|
||||||
if m != nil && m.String_ != nil {
|
|
||||||
return *m.String_
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Field) GetBytes() []byte {
|
|
||||||
if m != nil {
|
|
||||||
return m.Bytes
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Tag struct {
|
|
||||||
Key *string `protobuf:"bytes,1,req" json:"Key,omitempty"`
|
|
||||||
Value *string `protobuf:"bytes,2,req" json:"Value,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Tag) Reset() { *m = Tag{} }
|
|
||||||
func (m *Tag) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Tag) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *Tag) GetKey() string {
|
|
||||||
if m != nil && m.Key != nil {
|
|
||||||
return *m.Key
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Tag) GetValue() string {
|
|
||||||
if m != nil && m.Value != nil {
|
|
||||||
return *m.Value
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
type Point struct {
|
|
||||||
Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"`
|
|
||||||
Time *int64 `protobuf:"varint,2,req" json:"Time,omitempty"`
|
|
||||||
Fields []*Field `protobuf:"bytes,3,rep" json:"Fields,omitempty"`
|
|
||||||
Tags []*Tag `protobuf:"bytes,4,rep" json:"Tags,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Point) Reset() { *m = Point{} }
|
|
||||||
func (m *Point) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Point) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *Point) GetName() string {
|
|
||||||
if m != nil && m.Name != nil {
|
|
||||||
return *m.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Point) GetTime() int64 {
|
|
||||||
if m != nil && m.Time != nil {
|
|
||||||
return *m.Time
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Point) GetFields() []*Field {
|
|
||||||
if m != nil {
|
|
||||||
return m.Fields
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Point) GetTags() []*Tag {
|
|
||||||
if m != nil {
|
|
||||||
return m.Tags
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type WriteShardResponse struct {
|
|
||||||
Code *int32 `protobuf:"varint,1,req" json:"Code,omitempty"`
|
|
||||||
Message *string `protobuf:"bytes,2,opt" json:"Message,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *WriteShardResponse) Reset() { *m = WriteShardResponse{} }
|
|
||||||
func (m *WriteShardResponse) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*WriteShardResponse) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *WriteShardResponse) GetCode() int32 {
|
|
||||||
if m != nil && m.Code != nil {
|
|
||||||
return *m.Code
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *WriteShardResponse) GetMessage() string {
|
|
||||||
if m != nil && m.Message != nil {
|
|
||||||
return *m.Message
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
type MapShardRequest struct {
|
|
||||||
ShardID *uint64 `protobuf:"varint,1,req" json:"ShardID,omitempty"`
|
|
||||||
Query *string `protobuf:"bytes,2,req" json:"Query,omitempty"`
|
|
||||||
ChunkSize *int32 `protobuf:"varint,3,req" json:"ChunkSize,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapShardRequest) Reset() { *m = MapShardRequest{} }
|
|
||||||
func (m *MapShardRequest) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*MapShardRequest) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *MapShardRequest) GetShardID() uint64 {
|
|
||||||
if m != nil && m.ShardID != nil {
|
|
||||||
return *m.ShardID
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapShardRequest) GetQuery() string {
|
|
||||||
if m != nil && m.Query != nil {
|
|
||||||
return *m.Query
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapShardRequest) GetChunkSize() int32 {
|
|
||||||
if m != nil && m.ChunkSize != nil {
|
|
||||||
return *m.ChunkSize
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type MapShardResponse struct {
|
|
||||||
Code *int32 `protobuf:"varint,1,req" json:"Code,omitempty"`
|
|
||||||
Message *string `protobuf:"bytes,2,opt" json:"Message,omitempty"`
|
|
||||||
Data []byte `protobuf:"bytes,3,opt" json:"Data,omitempty"`
|
|
||||||
TagSets []string `protobuf:"bytes,4,rep" json:"TagSets,omitempty"`
|
|
||||||
Fields []string `protobuf:"bytes,5,rep" json:"Fields,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapShardResponse) Reset() { *m = MapShardResponse{} }
|
|
||||||
func (m *MapShardResponse) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*MapShardResponse) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (m *MapShardResponse) GetCode() int32 {
|
|
||||||
if m != nil && m.Code != nil {
|
|
||||||
return *m.Code
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapShardResponse) GetMessage() string {
|
|
||||||
if m != nil && m.Message != nil {
|
|
||||||
return *m.Message
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapShardResponse) GetData() []byte {
|
|
||||||
if m != nil {
|
|
||||||
return m.Data
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapShardResponse) GetTagSets() []string {
|
|
||||||
if m != nil {
|
|
||||||
return m.TagSets
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapShardResponse) GetFields() []string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Fields
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
}
|
|
49
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto
generated
vendored
49
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto
generated
vendored
|
@ -1,49 +0,0 @@
|
||||||
package internal;
|
|
||||||
|
|
||||||
message WriteShardRequest {
|
|
||||||
required uint64 ShardID = 1;
|
|
||||||
repeated Point Points = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Field {
|
|
||||||
required string Name = 1;
|
|
||||||
oneof Value {
|
|
||||||
int32 Int32 = 2;
|
|
||||||
int64 Int64 = 3;
|
|
||||||
double Float64 = 4;
|
|
||||||
bool Bool = 5;
|
|
||||||
string String = 6;
|
|
||||||
bytes Bytes = 7;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message Tag {
|
|
||||||
required string Key = 1;
|
|
||||||
required string Value = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Point {
|
|
||||||
required string Name = 1;
|
|
||||||
required int64 Time = 2;
|
|
||||||
repeated Field Fields = 3;
|
|
||||||
repeated Tag Tags = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message WriteShardResponse {
|
|
||||||
required int32 Code = 1;
|
|
||||||
optional string Message = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MapShardRequest {
|
|
||||||
required uint64 ShardID = 1;
|
|
||||||
required string Query = 2;
|
|
||||||
required int32 ChunkSize = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MapShardResponse {
|
|
||||||
required int32 Code = 1;
|
|
||||||
optional string Message = 2;
|
|
||||||
optional bytes Data = 3;
|
|
||||||
repeated string TagSets = 4;
|
|
||||||
repeated string Fields = 5;
|
|
||||||
}
|
|
|
@ -1,314 +0,0 @@
|
||||||
package cluster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ConsistencyLevel represent a required replication criteria before a write can
|
|
||||||
// be returned as successful
|
|
||||||
type ConsistencyLevel int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet
|
|
||||||
ConsistencyLevelAny ConsistencyLevel = iota
|
|
||||||
|
|
||||||
// ConsistencyLevelOne requires at least one data node acknowledged a write
|
|
||||||
ConsistencyLevelOne
|
|
||||||
|
|
||||||
// ConsistencyLevelOne requires a quorum of data nodes to acknowledge a write
|
|
||||||
ConsistencyLevelQuorum
|
|
||||||
|
|
||||||
// ConsistencyLevelAll requires all data nodes to acknowledge a write
|
|
||||||
ConsistencyLevelAll
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrTimeout is returned when a write times out.
|
|
||||||
ErrTimeout = errors.New("timeout")
|
|
||||||
|
|
||||||
// ErrPartialWrite is returned when a write partially succeeds but does
|
|
||||||
// not meet the requested consistency level.
|
|
||||||
ErrPartialWrite = errors.New("partial write")
|
|
||||||
|
|
||||||
// ErrWriteFailed is returned when no writes succeeded.
|
|
||||||
ErrWriteFailed = errors.New("write failed")
|
|
||||||
|
|
||||||
// ErrInvalidConsistencyLevel is returned when parsing the string version
|
|
||||||
// of a consistency level.
|
|
||||||
ErrInvalidConsistencyLevel = errors.New("invalid consistency level")
|
|
||||||
)
|
|
||||||
|
|
||||||
func ParseConsistencyLevel(level string) (ConsistencyLevel, error) {
|
|
||||||
switch strings.ToLower(level) {
|
|
||||||
case "any":
|
|
||||||
return ConsistencyLevelAny, nil
|
|
||||||
case "one":
|
|
||||||
return ConsistencyLevelOne, nil
|
|
||||||
case "quorum":
|
|
||||||
return ConsistencyLevelQuorum, nil
|
|
||||||
case "all":
|
|
||||||
return ConsistencyLevelAll, nil
|
|
||||||
default:
|
|
||||||
return 0, ErrInvalidConsistencyLevel
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PointsWriter handles writes across multiple local and remote data nodes.
|
|
||||||
type PointsWriter struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
closing chan struct{}
|
|
||||||
WriteTimeout time.Duration
|
|
||||||
Logger *log.Logger
|
|
||||||
|
|
||||||
MetaStore interface {
|
|
||||||
NodeID() uint64
|
|
||||||
Database(name string) (di *meta.DatabaseInfo, err error)
|
|
||||||
RetentionPolicy(database, policy string) (*meta.RetentionPolicyInfo, error)
|
|
||||||
CreateShardGroupIfNotExists(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error)
|
|
||||||
ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
TSDBStore interface {
|
|
||||||
CreateShard(database, retentionPolicy string, shardID uint64) error
|
|
||||||
WriteToShard(shardID uint64, points []tsdb.Point) error
|
|
||||||
}
|
|
||||||
|
|
||||||
ShardWriter interface {
|
|
||||||
WriteShard(shardID, ownerID uint64, points []tsdb.Point) error
|
|
||||||
}
|
|
||||||
|
|
||||||
HintedHandoff interface {
|
|
||||||
WriteShard(shardID, ownerID uint64, points []tsdb.Point) error
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPointsWriter returns a new instance of PointsWriter for a node.
|
|
||||||
func NewPointsWriter() *PointsWriter {
|
|
||||||
return &PointsWriter{
|
|
||||||
closing: make(chan struct{}),
|
|
||||||
WriteTimeout: DefaultWriteTimeout,
|
|
||||||
Logger: log.New(os.Stderr, "[write] ", log.LstdFlags),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShardMapping contains a mapping of a shards to a points.
|
|
||||||
type ShardMapping struct {
|
|
||||||
Points map[uint64][]tsdb.Point // The points associated with a shard ID
|
|
||||||
Shards map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewShardMapping creates an empty ShardMapping
|
|
||||||
func NewShardMapping() *ShardMapping {
|
|
||||||
return &ShardMapping{
|
|
||||||
Points: map[uint64][]tsdb.Point{},
|
|
||||||
Shards: map[uint64]*meta.ShardInfo{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapPoint maps a point to shard
|
|
||||||
func (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p tsdb.Point) {
|
|
||||||
points, ok := s.Points[shardInfo.ID]
|
|
||||||
if !ok {
|
|
||||||
s.Points[shardInfo.ID] = []tsdb.Point{p}
|
|
||||||
} else {
|
|
||||||
s.Points[shardInfo.ID] = append(points, p)
|
|
||||||
}
|
|
||||||
s.Shards[shardInfo.ID] = shardInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *PointsWriter) Open() error {
|
|
||||||
w.mu.Lock()
|
|
||||||
defer w.mu.Unlock()
|
|
||||||
if w.closing == nil {
|
|
||||||
w.closing = make(chan struct{})
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *PointsWriter) Close() error {
|
|
||||||
w.mu.Lock()
|
|
||||||
defer w.mu.Unlock()
|
|
||||||
if w.closing != nil {
|
|
||||||
close(w.closing)
|
|
||||||
w.closing = nil
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapShards maps the points contained in wp to a ShardMapping. If a point
|
|
||||||
// maps to a shard group or shard that does not currently exist, it will be
|
|
||||||
// created before returning the mapping.
|
|
||||||
func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error) {
|
|
||||||
|
|
||||||
// holds the start time ranges for required shard groups
|
|
||||||
timeRanges := map[time.Time]*meta.ShardGroupInfo{}
|
|
||||||
|
|
||||||
rp, err := w.MetaStore.RetentionPolicy(wp.Database, wp.RetentionPolicy)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range wp.Points {
|
|
||||||
timeRanges[p.Time().Truncate(rp.ShardGroupDuration)] = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// holds all the shard groups and shards that are required for writes
|
|
||||||
for t := range timeRanges {
|
|
||||||
sg, err := w.MetaStore.CreateShardGroupIfNotExists(wp.Database, wp.RetentionPolicy, t)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
timeRanges[t] = sg
|
|
||||||
}
|
|
||||||
|
|
||||||
mapping := NewShardMapping()
|
|
||||||
for _, p := range wp.Points {
|
|
||||||
sg := timeRanges[p.Time().Truncate(rp.ShardGroupDuration)]
|
|
||||||
sh := sg.ShardFor(p.HashID())
|
|
||||||
mapping.MapPoint(&sh, p)
|
|
||||||
}
|
|
||||||
return mapping, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WritePoints writes across multiple local and remote data nodes according the consistency level.
|
|
||||||
func (w *PointsWriter) WritePoints(p *WritePointsRequest) error {
|
|
||||||
if p.RetentionPolicy == "" {
|
|
||||||
db, err := w.MetaStore.Database(p.Database)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if db == nil {
|
|
||||||
return influxdb.ErrDatabaseNotFound(p.Database)
|
|
||||||
}
|
|
||||||
p.RetentionPolicy = db.DefaultRetentionPolicy
|
|
||||||
}
|
|
||||||
|
|
||||||
shardMappings, err := w.MapShards(p)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write each shard in it's own goroutine and return as soon
|
|
||||||
// as one fails.
|
|
||||||
ch := make(chan error, len(shardMappings.Points))
|
|
||||||
for shardID, points := range shardMappings.Points {
|
|
||||||
go func(shard *meta.ShardInfo, database, retentionPolicy string, points []tsdb.Point) {
|
|
||||||
ch <- w.writeToShard(shard, p.Database, p.RetentionPolicy, p.ConsistencyLevel, points)
|
|
||||||
}(shardMappings.Shards[shardID], p.Database, p.RetentionPolicy, points)
|
|
||||||
}
|
|
||||||
|
|
||||||
for range shardMappings.Points {
|
|
||||||
select {
|
|
||||||
case <-w.closing:
|
|
||||||
return ErrWriteFailed
|
|
||||||
case err := <-ch:
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeToShards writes points to a shard and ensures a write consistency level has been met. If the write
|
|
||||||
// partially succeeds, ErrPartialWrite is returned.
|
|
||||||
func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPolicy string,
|
|
||||||
consistency ConsistencyLevel, points []tsdb.Point) error {
|
|
||||||
// The required number of writes to achieve the requested consistency level
|
|
||||||
required := len(shard.OwnerIDs)
|
|
||||||
switch consistency {
|
|
||||||
case ConsistencyLevelAny, ConsistencyLevelOne:
|
|
||||||
required = 1
|
|
||||||
case ConsistencyLevelQuorum:
|
|
||||||
required = required/2 + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// response channel for each shard writer go routine
|
|
||||||
ch := make(chan error, len(shard.OwnerIDs))
|
|
||||||
|
|
||||||
for _, nodeID := range shard.OwnerIDs {
|
|
||||||
go func(shardID, nodeID uint64, points []tsdb.Point) {
|
|
||||||
if w.MetaStore.NodeID() == nodeID {
|
|
||||||
err := w.TSDBStore.WriteToShard(shardID, points)
|
|
||||||
// If we've written to shard that should exist on the current node, but the store has
|
|
||||||
// not actually created this shard, tell it to create it and retry the write
|
|
||||||
if err == tsdb.ErrShardNotFound {
|
|
||||||
err = w.TSDBStore.CreateShard(database, retentionPolicy, shardID)
|
|
||||||
if err != nil {
|
|
||||||
ch <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = w.TSDBStore.WriteToShard(shardID, points)
|
|
||||||
}
|
|
||||||
ch <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err := w.ShardWriter.WriteShard(shardID, nodeID, points)
|
|
||||||
if err != nil && tsdb.IsRetryable(err) {
|
|
||||||
// The remote write failed so queue it via hinted handoff
|
|
||||||
hherr := w.HintedHandoff.WriteShard(shardID, nodeID, points)
|
|
||||||
|
|
||||||
// If the write consistency level is ANY, then a successful hinted handoff can
|
|
||||||
// be considered a successful write so send nil to the response channel
|
|
||||||
// otherwise, let the original error propogate to the response channel
|
|
||||||
if hherr == nil && consistency == ConsistencyLevelAny {
|
|
||||||
ch <- nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ch <- err
|
|
||||||
|
|
||||||
}(shard.ID, nodeID, points)
|
|
||||||
}
|
|
||||||
|
|
||||||
var wrote int
|
|
||||||
timeout := time.After(w.WriteTimeout)
|
|
||||||
var writeError error
|
|
||||||
for _, nodeID := range shard.OwnerIDs {
|
|
||||||
select {
|
|
||||||
case <-w.closing:
|
|
||||||
return ErrWriteFailed
|
|
||||||
case <-timeout:
|
|
||||||
// return timeout error to caller
|
|
||||||
return ErrTimeout
|
|
||||||
case err := <-ch:
|
|
||||||
// If the write returned an error, continue to the next response
|
|
||||||
if err != nil {
|
|
||||||
w.Logger.Printf("write failed for shard %d on node %d: %v", shard.ID, nodeID, err)
|
|
||||||
|
|
||||||
// Keep track of the first error we see to return back to the client
|
|
||||||
if writeError == nil {
|
|
||||||
writeError = err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
wrote += 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We wrote the required consistency level
|
|
||||||
if wrote >= required {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if wrote > 0 {
|
|
||||||
return ErrPartialWrite
|
|
||||||
}
|
|
||||||
|
|
||||||
if writeError != nil {
|
|
||||||
return fmt.Errorf("write failed: %v", writeError)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ErrWriteFailed
|
|
||||||
}
|
|
436
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go
generated
vendored
436
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go
generated
vendored
|
@ -1,436 +0,0 @@
|
||||||
package cluster_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/cluster"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ensures the points writer maps a single point to a single shard.
|
|
||||||
func TestPointsWriter_MapShards_One(t *testing.T) {
|
|
||||||
ms := MetaStore{}
|
|
||||||
rp := NewRetentionPolicy("myp", time.Hour, 3)
|
|
||||||
|
|
||||||
ms.NodeIDFn = func() uint64 { return 1 }
|
|
||||||
ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) {
|
|
||||||
return rp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {
|
|
||||||
return &rp.ShardGroups[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
c := cluster.PointsWriter{MetaStore: ms}
|
|
||||||
pr := &cluster.WritePointsRequest{
|
|
||||||
Database: "mydb",
|
|
||||||
RetentionPolicy: "myrp",
|
|
||||||
ConsistencyLevel: cluster.ConsistencyLevelOne,
|
|
||||||
}
|
|
||||||
pr.AddPoint("cpu", 1.0, time.Now(), nil)
|
|
||||||
|
|
||||||
var (
|
|
||||||
shardMappings *cluster.ShardMapping
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
if shardMappings, err = c.MapShards(pr); err != nil {
|
|
||||||
t.Fatalf("unexpected an error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := 1; len(shardMappings.Points) != exp {
|
|
||||||
t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensures the points writer maps a multiple points across shard group boundaries.
|
|
||||||
func TestPointsWriter_MapShards_Multiple(t *testing.T) {
|
|
||||||
ms := MetaStore{}
|
|
||||||
rp := NewRetentionPolicy("myp", time.Hour, 3)
|
|
||||||
AttachShardGroupInfo(rp, []uint64{1, 2, 3})
|
|
||||||
AttachShardGroupInfo(rp, []uint64{1, 2, 3})
|
|
||||||
|
|
||||||
ms.NodeIDFn = func() uint64 { return 1 }
|
|
||||||
ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) {
|
|
||||||
return rp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {
|
|
||||||
for i, sg := range rp.ShardGroups {
|
|
||||||
if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) {
|
|
||||||
return &rp.ShardGroups[i], nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic("should not get here")
|
|
||||||
}
|
|
||||||
|
|
||||||
c := cluster.PointsWriter{MetaStore: ms}
|
|
||||||
pr := &cluster.WritePointsRequest{
|
|
||||||
Database: "mydb",
|
|
||||||
RetentionPolicy: "myrp",
|
|
||||||
ConsistencyLevel: cluster.ConsistencyLevelOne,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Three points that range over the shardGroup duration (1h) and should map to two
|
|
||||||
// distinct shards
|
|
||||||
pr.AddPoint("cpu", 1.0, time.Unix(0, 0), nil)
|
|
||||||
pr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil)
|
|
||||||
pr.AddPoint("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil)
|
|
||||||
|
|
||||||
var (
|
|
||||||
shardMappings *cluster.ShardMapping
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
if shardMappings, err = c.MapShards(pr); err != nil {
|
|
||||||
t.Fatalf("unexpected an error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := 2; len(shardMappings.Points) != exp {
|
|
||||||
t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, points := range shardMappings.Points {
|
|
||||||
// First shard shoud have 1 point w/ first point added
|
|
||||||
if len(points) == 1 && points[0].Time() != pr.Points[0].Time() {
|
|
||||||
t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[0].Time())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second shard shoud have the last two points added
|
|
||||||
if len(points) == 2 && points[0].Time() != pr.Points[1].Time() {
|
|
||||||
t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[1].Time())
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(points) == 2 && points[1].Time() != pr.Points[2].Time() {
|
|
||||||
t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[1].Time(), pr.Points[2].Time())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPointsWriter_WritePoints(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
database string
|
|
||||||
retentionPolicy string
|
|
||||||
consistency cluster.ConsistencyLevel
|
|
||||||
|
|
||||||
// the responses returned by each shard write call. node ID 1 = pos 0
|
|
||||||
err []error
|
|
||||||
expErr error
|
|
||||||
}{
|
|
||||||
// Consistency one
|
|
||||||
{
|
|
||||||
name: "write one success",
|
|
||||||
database: "mydb",
|
|
||||||
retentionPolicy: "myrp",
|
|
||||||
consistency: cluster.ConsistencyLevelOne,
|
|
||||||
err: []error{nil, nil, nil},
|
|
||||||
expErr: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "write one error",
|
|
||||||
database: "mydb",
|
|
||||||
retentionPolicy: "myrp",
|
|
||||||
consistency: cluster.ConsistencyLevelOne,
|
|
||||||
err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")},
|
|
||||||
expErr: fmt.Errorf("write failed: a failure"),
|
|
||||||
},
|
|
||||||
|
|
||||||
// Consistency any
|
|
||||||
{
|
|
||||||
name: "write any success",
|
|
||||||
database: "mydb",
|
|
||||||
retentionPolicy: "myrp",
|
|
||||||
consistency: cluster.ConsistencyLevelAny,
|
|
||||||
err: []error{fmt.Errorf("a failure"), nil, fmt.Errorf("a failure")},
|
|
||||||
expErr: nil,
|
|
||||||
},
|
|
||||||
// Consistency all
|
|
||||||
{
|
|
||||||
name: "write all success",
|
|
||||||
database: "mydb",
|
|
||||||
retentionPolicy: "myrp",
|
|
||||||
consistency: cluster.ConsistencyLevelAll,
|
|
||||||
err: []error{nil, nil, nil},
|
|
||||||
expErr: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "write all, 2/3, partial write",
|
|
||||||
database: "mydb",
|
|
||||||
retentionPolicy: "myrp",
|
|
||||||
consistency: cluster.ConsistencyLevelAll,
|
|
||||||
err: []error{nil, fmt.Errorf("a failure"), nil},
|
|
||||||
expErr: cluster.ErrPartialWrite,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "write all, 1/3 (failure)",
|
|
||||||
database: "mydb",
|
|
||||||
retentionPolicy: "myrp",
|
|
||||||
consistency: cluster.ConsistencyLevelAll,
|
|
||||||
err: []error{nil, fmt.Errorf("a failure"), fmt.Errorf("a failure")},
|
|
||||||
expErr: cluster.ErrPartialWrite,
|
|
||||||
},
|
|
||||||
|
|
||||||
// Consistency quorum
|
|
||||||
{
|
|
||||||
name: "write quorum, 1/3 failure",
|
|
||||||
consistency: cluster.ConsistencyLevelQuorum,
|
|
||||||
database: "mydb",
|
|
||||||
retentionPolicy: "myrp",
|
|
||||||
err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), nil},
|
|
||||||
expErr: cluster.ErrPartialWrite,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "write quorum, 2/3 success",
|
|
||||||
database: "mydb",
|
|
||||||
retentionPolicy: "myrp",
|
|
||||||
consistency: cluster.ConsistencyLevelQuorum,
|
|
||||||
err: []error{nil, nil, fmt.Errorf("a failure")},
|
|
||||||
expErr: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "write quorum, 3/3 success",
|
|
||||||
database: "mydb",
|
|
||||||
retentionPolicy: "myrp",
|
|
||||||
consistency: cluster.ConsistencyLevelQuorum,
|
|
||||||
err: []error{nil, nil, nil},
|
|
||||||
expErr: nil,
|
|
||||||
},
|
|
||||||
|
|
||||||
// Error write error
|
|
||||||
{
|
|
||||||
name: "no writes succeed",
|
|
||||||
database: "mydb",
|
|
||||||
retentionPolicy: "myrp",
|
|
||||||
consistency: cluster.ConsistencyLevelOne,
|
|
||||||
err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")},
|
|
||||||
expErr: fmt.Errorf("write failed: a failure"),
|
|
||||||
},
|
|
||||||
|
|
||||||
// Hinted handoff w/ ANY
|
|
||||||
{
|
|
||||||
name: "hinted handoff write succeed",
|
|
||||||
database: "mydb",
|
|
||||||
retentionPolicy: "myrp",
|
|
||||||
consistency: cluster.ConsistencyLevelAny,
|
|
||||||
err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")},
|
|
||||||
expErr: nil,
|
|
||||||
},
|
|
||||||
|
|
||||||
// Write to non-existant database
|
|
||||||
{
|
|
||||||
name: "write to non-existant database",
|
|
||||||
database: "doesnt_exist",
|
|
||||||
retentionPolicy: "",
|
|
||||||
consistency: cluster.ConsistencyLevelAny,
|
|
||||||
err: []error{nil, nil, nil},
|
|
||||||
expErr: fmt.Errorf("database not found: doesnt_exist"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
|
|
||||||
pr := &cluster.WritePointsRequest{
|
|
||||||
Database: test.database,
|
|
||||||
RetentionPolicy: test.retentionPolicy,
|
|
||||||
ConsistencyLevel: test.consistency,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Three points that range over the shardGroup duration (1h) and should map to two
|
|
||||||
// distinct shards
|
|
||||||
pr.AddPoint("cpu", 1.0, time.Unix(0, 0), nil)
|
|
||||||
pr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil)
|
|
||||||
pr.AddPoint("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil)
|
|
||||||
|
|
||||||
// copy to prevent data race
|
|
||||||
theTest := test
|
|
||||||
sm := cluster.NewShardMapping()
|
|
||||||
sm.MapPoint(
|
|
||||||
&meta.ShardInfo{ID: uint64(1), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}},
|
|
||||||
pr.Points[0])
|
|
||||||
sm.MapPoint(
|
|
||||||
&meta.ShardInfo{ID: uint64(2), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}},
|
|
||||||
pr.Points[1])
|
|
||||||
sm.MapPoint(
|
|
||||||
&meta.ShardInfo{ID: uint64(2), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}},
|
|
||||||
pr.Points[2])
|
|
||||||
|
|
||||||
// Local cluster.Node ShardWriter
|
|
||||||
// lock on the write increment since these functions get called in parallel
|
|
||||||
var mu sync.Mutex
|
|
||||||
sw := &fakeShardWriter{
|
|
||||||
ShardWriteFn: func(shardID, nodeID uint64, points []tsdb.Point) error {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
return theTest.err[int(nodeID)-1]
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
store := &fakeStore{
|
|
||||||
WriteFn: func(shardID uint64, points []tsdb.Point) error {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
return theTest.err[0]
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
hh := &fakeShardWriter{
|
|
||||||
ShardWriteFn: func(shardID, nodeID uint64, points []tsdb.Point) error {
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
ms := NewMetaStore()
|
|
||||||
ms.DatabaseFn = func(database string) (*meta.DatabaseInfo, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
ms.NodeIDFn = func() uint64 { return 1 }
|
|
||||||
c := cluster.NewPointsWriter()
|
|
||||||
c.MetaStore = ms
|
|
||||||
c.ShardWriter = sw
|
|
||||||
c.TSDBStore = store
|
|
||||||
c.HintedHandoff = hh
|
|
||||||
|
|
||||||
err := c.WritePoints(pr)
|
|
||||||
if err == nil && test.expErr != nil {
|
|
||||||
t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil && test.expErr == nil {
|
|
||||||
t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr)
|
|
||||||
}
|
|
||||||
if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() {
|
|
||||||
t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var shardID uint64
|
|
||||||
|
|
||||||
type fakeShardWriter struct {
|
|
||||||
ShardWriteFn func(shardID, nodeID uint64, points []tsdb.Point) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fakeShardWriter) WriteShard(shardID, nodeID uint64, points []tsdb.Point) error {
|
|
||||||
return f.ShardWriteFn(shardID, nodeID, points)
|
|
||||||
}
|
|
||||||
|
|
||||||
type fakeStore struct {
|
|
||||||
WriteFn func(shardID uint64, points []tsdb.Point) error
|
|
||||||
CreateShardfn func(database, retentionPolicy string, shardID uint64) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fakeStore) WriteToShard(shardID uint64, points []tsdb.Point) error {
|
|
||||||
return f.WriteFn(shardID, points)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fakeStore) CreateShard(database, retentionPolicy string, shardID uint64) error {
|
|
||||||
return f.CreateShardfn(database, retentionPolicy, shardID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMetaStore() *MetaStore {
|
|
||||||
ms := &MetaStore{}
|
|
||||||
rp := NewRetentionPolicy("myp", time.Hour, 3)
|
|
||||||
AttachShardGroupInfo(rp, []uint64{1, 2, 3})
|
|
||||||
AttachShardGroupInfo(rp, []uint64{1, 2, 3})
|
|
||||||
|
|
||||||
ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) {
|
|
||||||
return rp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {
|
|
||||||
for i, sg := range rp.ShardGroups {
|
|
||||||
if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) {
|
|
||||||
return &rp.ShardGroups[i], nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic("should not get here")
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
|
|
||||||
type MetaStore struct {
|
|
||||||
NodeIDFn func() uint64
|
|
||||||
RetentionPolicyFn func(database, name string) (*meta.RetentionPolicyInfo, error)
|
|
||||||
CreateShardGroupIfNotExistsFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error)
|
|
||||||
DatabaseFn func(database string) (*meta.DatabaseInfo, error)
|
|
||||||
ShardOwnerFn func(shardID uint64) (string, string, *meta.ShardGroupInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m MetaStore) NodeID() uint64 { return m.NodeIDFn() }
|
|
||||||
|
|
||||||
func (m MetaStore) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error) {
|
|
||||||
return m.RetentionPolicyFn(database, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m MetaStore) CreateShardGroupIfNotExists(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {
|
|
||||||
return m.CreateShardGroupIfNotExistsFn(database, policy, timestamp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m MetaStore) Database(database string) (*meta.DatabaseInfo, error) {
|
|
||||||
return m.DatabaseFn(database)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m MetaStore) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) {
|
|
||||||
return m.ShardOwnerFn(shardID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *meta.RetentionPolicyInfo {
|
|
||||||
shards := []meta.ShardInfo{}
|
|
||||||
ownerIDs := []uint64{}
|
|
||||||
for i := 1; i <= nodeCount; i++ {
|
|
||||||
ownerIDs = append(ownerIDs, uint64(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
// each node is fully replicated with each other
|
|
||||||
shards = append(shards, meta.ShardInfo{
|
|
||||||
ID: nextShardID(),
|
|
||||||
OwnerIDs: ownerIDs,
|
|
||||||
})
|
|
||||||
|
|
||||||
rp := &meta.RetentionPolicyInfo{
|
|
||||||
Name: "myrp",
|
|
||||||
ReplicaN: nodeCount,
|
|
||||||
Duration: duration,
|
|
||||||
ShardGroupDuration: duration,
|
|
||||||
ShardGroups: []meta.ShardGroupInfo{
|
|
||||||
meta.ShardGroupInfo{
|
|
||||||
ID: nextShardID(),
|
|
||||||
StartTime: time.Unix(0, 0),
|
|
||||||
EndTime: time.Unix(0, 0).Add(duration).Add(-1),
|
|
||||||
Shards: shards,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return rp
|
|
||||||
}
|
|
||||||
|
|
||||||
func AttachShardGroupInfo(rp *meta.RetentionPolicyInfo, ownerIDs []uint64) {
|
|
||||||
var startTime, endTime time.Time
|
|
||||||
if len(rp.ShardGroups) == 0 {
|
|
||||||
startTime = time.Unix(0, 0)
|
|
||||||
} else {
|
|
||||||
startTime = rp.ShardGroups[len(rp.ShardGroups)-1].StartTime.Add(rp.ShardGroupDuration)
|
|
||||||
}
|
|
||||||
endTime = startTime.Add(rp.ShardGroupDuration).Add(-1)
|
|
||||||
|
|
||||||
sh := meta.ShardGroupInfo{
|
|
||||||
ID: uint64(len(rp.ShardGroups) + 1),
|
|
||||||
StartTime: startTime,
|
|
||||||
EndTime: endTime,
|
|
||||||
Shards: []meta.ShardInfo{
|
|
||||||
meta.ShardInfo{
|
|
||||||
ID: nextShardID(),
|
|
||||||
OwnerIDs: ownerIDs,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
rp.ShardGroups = append(rp.ShardGroups, sh)
|
|
||||||
}
|
|
||||||
|
|
||||||
func nextShardID() uint64 {
|
|
||||||
return atomic.AddUint64(&shardID, 1)
|
|
||||||
}
|
|
|
@ -1,229 +0,0 @@
|
||||||
package cluster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/gogo/protobuf/proto"
|
|
||||||
"github.com/influxdb/influxdb/cluster/internal"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:generate protoc --gogo_out=. internal/data.proto
|
|
||||||
|
|
||||||
// MapShardRequest represents the request to map a remote shard for a query.
|
|
||||||
type MapShardRequest struct {
|
|
||||||
pb internal.MapShardRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapShardRequest) ShardID() uint64 { return m.pb.GetShardID() }
|
|
||||||
func (m *MapShardRequest) Query() string { return m.pb.GetQuery() }
|
|
||||||
func (m *MapShardRequest) ChunkSize() int32 { return m.pb.GetChunkSize() }
|
|
||||||
|
|
||||||
func (m *MapShardRequest) SetShardID(id uint64) { m.pb.ShardID = &id }
|
|
||||||
func (m *MapShardRequest) SetQuery(query string) { m.pb.Query = &query }
|
|
||||||
func (m *MapShardRequest) SetChunkSize(chunkSize int32) { m.pb.ChunkSize = &chunkSize }
|
|
||||||
|
|
||||||
// MarshalBinary encodes the object to a binary format.
|
|
||||||
func (m *MapShardRequest) MarshalBinary() ([]byte, error) {
|
|
||||||
return proto.Marshal(&m.pb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary populates MapShardRequest from a binary format.
|
|
||||||
func (m *MapShardRequest) UnmarshalBinary(buf []byte) error {
|
|
||||||
if err := proto.Unmarshal(buf, &m.pb); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapShardResponse represents the response returned from a remote MapShardRequest call
|
|
||||||
type MapShardResponse struct {
|
|
||||||
pb internal.MapShardResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMapShardResponse(code int, message string) *MapShardResponse {
|
|
||||||
m := &MapShardResponse{}
|
|
||||||
m.SetCode(code)
|
|
||||||
m.SetMessage(message)
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *MapShardResponse) Code() int { return int(r.pb.GetCode()) }
|
|
||||||
func (r *MapShardResponse) Message() string { return r.pb.GetMessage() }
|
|
||||||
func (r *MapShardResponse) TagSets() []string { return r.pb.GetTagSets() }
|
|
||||||
func (r *MapShardResponse) Fields() []string { return r.pb.GetFields() }
|
|
||||||
func (r *MapShardResponse) Data() []byte { return r.pb.GetData() }
|
|
||||||
|
|
||||||
func (r *MapShardResponse) SetCode(code int) { r.pb.Code = proto.Int32(int32(code)) }
|
|
||||||
func (r *MapShardResponse) SetMessage(message string) { r.pb.Message = &message }
|
|
||||||
func (r *MapShardResponse) SetTagSets(tagsets []string) { r.pb.TagSets = tagsets }
|
|
||||||
func (r *MapShardResponse) SetFields(fields []string) { r.pb.Fields = fields }
|
|
||||||
func (r *MapShardResponse) SetData(data []byte) { r.pb.Data = data }
|
|
||||||
|
|
||||||
// MarshalBinary encodes the object to a binary format.
|
|
||||||
func (r *MapShardResponse) MarshalBinary() ([]byte, error) {
|
|
||||||
return proto.Marshal(&r.pb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary populates WritePointRequest from a binary format.
|
|
||||||
func (r *MapShardResponse) UnmarshalBinary(buf []byte) error {
|
|
||||||
if err := proto.Unmarshal(buf, &r.pb); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WritePointsRequest represents a request to write point data to the cluster
|
|
||||||
type WritePointsRequest struct {
|
|
||||||
Database string
|
|
||||||
RetentionPolicy string
|
|
||||||
ConsistencyLevel ConsistencyLevel
|
|
||||||
Points []tsdb.Point
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddPoint adds a point to the WritePointRequest with field name 'value'
|
|
||||||
func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) {
|
|
||||||
w.Points = append(w.Points, tsdb.NewPoint(
|
|
||||||
name, tags, map[string]interface{}{"value": value}, timestamp,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteShardRequest represents the a request to write a slice of points to a shard
|
|
||||||
type WriteShardRequest struct {
|
|
||||||
pb internal.WriteShardRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteShardResponse represents the response returned from a remote WriteShardRequest call
|
|
||||||
type WriteShardResponse struct {
|
|
||||||
pb internal.WriteShardResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *WriteShardRequest) SetShardID(id uint64) { w.pb.ShardID = &id }
|
|
||||||
func (w *WriteShardRequest) ShardID() uint64 { return w.pb.GetShardID() }
|
|
||||||
|
|
||||||
func (w *WriteShardRequest) Points() []tsdb.Point { return w.unmarshalPoints() }
|
|
||||||
|
|
||||||
func (w *WriteShardRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) {
|
|
||||||
w.AddPoints([]tsdb.Point{tsdb.NewPoint(
|
|
||||||
name, tags, map[string]interface{}{"value": value}, timestamp,
|
|
||||||
)})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *WriteShardRequest) AddPoints(points []tsdb.Point) {
|
|
||||||
w.pb.Points = append(w.pb.Points, w.marshalPoints(points)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalBinary encodes the object to a binary format.
|
|
||||||
func (w *WriteShardRequest) MarshalBinary() ([]byte, error) {
|
|
||||||
return proto.Marshal(&w.pb)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *WriteShardRequest) marshalPoints(points []tsdb.Point) []*internal.Point {
|
|
||||||
pts := make([]*internal.Point, len(points))
|
|
||||||
for i, p := range points {
|
|
||||||
fields := []*internal.Field{}
|
|
||||||
for k, v := range p.Fields() {
|
|
||||||
name := k
|
|
||||||
f := &internal.Field{
|
|
||||||
Name: &name,
|
|
||||||
}
|
|
||||||
switch t := v.(type) {
|
|
||||||
case int:
|
|
||||||
f.Int64 = proto.Int64(int64(t))
|
|
||||||
case int32:
|
|
||||||
f.Int32 = proto.Int32(t)
|
|
||||||
case int64:
|
|
||||||
f.Int64 = proto.Int64(t)
|
|
||||||
case float64:
|
|
||||||
f.Float64 = proto.Float64(t)
|
|
||||||
case bool:
|
|
||||||
f.Bool = proto.Bool(t)
|
|
||||||
case string:
|
|
||||||
f.String_ = proto.String(t)
|
|
||||||
case []byte:
|
|
||||||
f.Bytes = t
|
|
||||||
}
|
|
||||||
fields = append(fields, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
tags := []*internal.Tag{}
|
|
||||||
for k, v := range p.Tags() {
|
|
||||||
key := k
|
|
||||||
value := v
|
|
||||||
tags = append(tags, &internal.Tag{
|
|
||||||
Key: &key,
|
|
||||||
Value: &value,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
name := p.Name()
|
|
||||||
pts[i] = &internal.Point{
|
|
||||||
Name: &name,
|
|
||||||
Time: proto.Int64(p.Time().UnixNano()),
|
|
||||||
Fields: fields,
|
|
||||||
Tags: tags,
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return pts
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary populates WritePointRequest from a binary format.
|
|
||||||
func (w *WriteShardRequest) UnmarshalBinary(buf []byte) error {
|
|
||||||
if err := proto.Unmarshal(buf, &w.pb); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *WriteShardRequest) unmarshalPoints() []tsdb.Point {
|
|
||||||
points := make([]tsdb.Point, len(w.pb.GetPoints()))
|
|
||||||
for i, p := range w.pb.GetPoints() {
|
|
||||||
pt := tsdb.NewPoint(
|
|
||||||
p.GetName(), map[string]string{},
|
|
||||||
map[string]interface{}{}, time.Unix(0, p.GetTime()))
|
|
||||||
|
|
||||||
for _, f := range p.GetFields() {
|
|
||||||
n := f.GetName()
|
|
||||||
if f.Int32 != nil {
|
|
||||||
pt.AddField(n, f.GetInt32())
|
|
||||||
} else if f.Int64 != nil {
|
|
||||||
pt.AddField(n, f.GetInt64())
|
|
||||||
} else if f.Float64 != nil {
|
|
||||||
pt.AddField(n, f.GetFloat64())
|
|
||||||
} else if f.Bool != nil {
|
|
||||||
pt.AddField(n, f.GetBool())
|
|
||||||
} else if f.String_ != nil {
|
|
||||||
pt.AddField(n, f.GetString_())
|
|
||||||
} else {
|
|
||||||
pt.AddField(n, f.GetBytes())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tags := tsdb.Tags{}
|
|
||||||
for _, t := range p.GetTags() {
|
|
||||||
tags[t.GetKey()] = t.GetValue()
|
|
||||||
}
|
|
||||||
pt.SetTags(tags)
|
|
||||||
points[i] = pt
|
|
||||||
}
|
|
||||||
return points
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *WriteShardResponse) SetCode(code int) { w.pb.Code = proto.Int32(int32(code)) }
|
|
||||||
func (w *WriteShardResponse) SetMessage(message string) { w.pb.Message = &message }
|
|
||||||
|
|
||||||
func (w *WriteShardResponse) Code() int { return int(w.pb.GetCode()) }
|
|
||||||
func (w *WriteShardResponse) Message() string { return w.pb.GetMessage() }
|
|
||||||
|
|
||||||
// MarshalBinary encodes the object to a binary format.
|
|
||||||
func (w *WriteShardResponse) MarshalBinary() ([]byte, error) {
|
|
||||||
return proto.Marshal(&w.pb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary populates WritePointRequest from a binary format.
|
|
||||||
func (w *WriteShardResponse) UnmarshalBinary(buf []byte) error {
|
|
||||||
if err := proto.Unmarshal(buf, &w.pb); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,110 +0,0 @@
|
||||||
package cluster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestWriteShardRequestBinary(t *testing.T) {
|
|
||||||
sr := &WriteShardRequest{}
|
|
||||||
|
|
||||||
sr.SetShardID(uint64(1))
|
|
||||||
if exp := uint64(1); sr.ShardID() != exp {
|
|
||||||
t.Fatalf("ShardID mismatch: got %v, exp %v", sr.ShardID(), exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
sr.AddPoint("cpu", 1.0, time.Unix(0, 0), map[string]string{"host": "serverA"})
|
|
||||||
sr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil)
|
|
||||||
sr.AddPoint("cpu_load", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil)
|
|
||||||
|
|
||||||
b, err := sr.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("WritePointsRequest.MarshalBinary() failed: %v", err)
|
|
||||||
}
|
|
||||||
if len(b) == 0 {
|
|
||||||
t.Fatalf("WritePointsRequest.MarshalBinary() returned 0 bytes")
|
|
||||||
}
|
|
||||||
|
|
||||||
got := &WriteShardRequest{}
|
|
||||||
if err := got.UnmarshalBinary(b); err != nil {
|
|
||||||
t.Fatalf("WritePointsRequest.UnmarshalMarshalBinary() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if got.ShardID() != sr.ShardID() {
|
|
||||||
t.Errorf("ShardID mismatch: got %v, exp %v", got.ShardID(), sr.ShardID())
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(got.Points()) != len(sr.Points()) {
|
|
||||||
t.Errorf("Points count mismatch: got %v, exp %v", len(got.Points()), len(sr.Points()))
|
|
||||||
}
|
|
||||||
|
|
||||||
srPoints := sr.Points()
|
|
||||||
gotPoints := got.Points()
|
|
||||||
for i, p := range srPoints {
|
|
||||||
g := gotPoints[i]
|
|
||||||
|
|
||||||
if g.Name() != p.Name() {
|
|
||||||
t.Errorf("Point %d name mismatch: got %v, exp %v", i, g.Name(), p.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
if !g.Time().Equal(p.Time()) {
|
|
||||||
t.Errorf("Point %d time mismatch: got %v, exp %v", i, g.Time(), p.Time())
|
|
||||||
}
|
|
||||||
|
|
||||||
if g.HashID() != p.HashID() {
|
|
||||||
t.Errorf("Point #%d HashID() mismatch: got %v, exp %v", i, g.HashID(), p.HashID())
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range p.Tags() {
|
|
||||||
if g.Tags()[k] != v {
|
|
||||||
t.Errorf("Point #%d tag mismatch: got %v, exp %v", i, k, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(p.Fields()) != len(g.Fields()) {
|
|
||||||
t.Errorf("Point %d field count mismatch: got %v, exp %v", i, len(g.Fields()), len(p.Fields()))
|
|
||||||
}
|
|
||||||
|
|
||||||
for j, f := range p.Fields() {
|
|
||||||
if g.Fields()[j] != f {
|
|
||||||
t.Errorf("Point %d field mismatch: got %v, exp %v", i, g.Fields()[j], f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriteShardResponseBinary(t *testing.T) {
|
|
||||||
sr := &WriteShardResponse{}
|
|
||||||
sr.SetCode(10)
|
|
||||||
sr.SetMessage("foo")
|
|
||||||
b, err := sr.MarshalBinary()
|
|
||||||
|
|
||||||
if exp := 10; sr.Code() != exp {
|
|
||||||
t.Fatalf("Code mismatch: got %v, exp %v", sr.Code(), exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := "foo"; sr.Message() != exp {
|
|
||||||
t.Fatalf("Message mismatch: got %v, exp %v", sr.Message(), exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("WritePointsResponse.MarshalBinary() failed: %v", err)
|
|
||||||
}
|
|
||||||
if len(b) == 0 {
|
|
||||||
t.Fatalf("WritePointsResponse.MarshalBinary() returned 0 bytes")
|
|
||||||
}
|
|
||||||
|
|
||||||
got := &WriteShardResponse{}
|
|
||||||
if err := got.UnmarshalBinary(b); err != nil {
|
|
||||||
t.Fatalf("WritePointsResponse.UnmarshalMarshalBinary() failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if got.Code() != sr.Code() {
|
|
||||||
t.Errorf("Code mismatch: got %v, exp %v", got.Code(), sr.Code())
|
|
||||||
}
|
|
||||||
|
|
||||||
if got.Message() != sr.Message() {
|
|
||||||
t.Errorf("Message mismatch: got %v, exp %v", got.Message(), sr.Message())
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,338 +0,0 @@
|
||||||
package cluster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MaxMessageSize defines how large a message can be before we reject it
|
|
||||||
const MaxMessageSize = 1024 * 1024 * 1024 // 1GB
|
|
||||||
|
|
||||||
// MuxHeader is the header byte used in the TCP mux.
|
|
||||||
const MuxHeader = 2
|
|
||||||
|
|
||||||
// Service processes data received over raw TCP connections.
|
|
||||||
type Service struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
|
|
||||||
wg sync.WaitGroup
|
|
||||||
closing chan struct{}
|
|
||||||
|
|
||||||
Listener net.Listener
|
|
||||||
|
|
||||||
MetaStore interface {
|
|
||||||
ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
TSDBStore interface {
|
|
||||||
CreateShard(database, policy string, shardID uint64) error
|
|
||||||
WriteToShard(shardID uint64, points []tsdb.Point) error
|
|
||||||
CreateMapper(shardID uint64, query string, chunkSize int) (tsdb.Mapper, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
Logger *log.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewService returns a new instance of Service.
|
|
||||||
func NewService(c Config) *Service {
|
|
||||||
return &Service{
|
|
||||||
closing: make(chan struct{}),
|
|
||||||
Logger: log.New(os.Stderr, "[tcp] ", log.LstdFlags),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens the network listener and begins serving requests.
|
|
||||||
func (s *Service) Open() error {
|
|
||||||
|
|
||||||
s.Logger.Println("Starting cluster service")
|
|
||||||
// Begin serving conections.
|
|
||||||
s.wg.Add(1)
|
|
||||||
go s.serve()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogger sets the internal logger to the logger passed in.
|
|
||||||
func (s *Service) SetLogger(l *log.Logger) {
|
|
||||||
s.Logger = l
|
|
||||||
}
|
|
||||||
|
|
||||||
// serve accepts connections from the listener and handles them.
|
|
||||||
func (s *Service) serve() {
|
|
||||||
defer s.wg.Done()
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Check if the service is shutting down.
|
|
||||||
select {
|
|
||||||
case <-s.closing:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accept the next connection.
|
|
||||||
conn, err := s.Listener.Accept()
|
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(err.Error(), "connection closed") {
|
|
||||||
s.Logger.Printf("cluster service accept error: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.Logger.Printf("accept error: %s", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delegate connection handling to a separate goroutine.
|
|
||||||
s.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer s.wg.Done()
|
|
||||||
s.handleConn(conn)
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close shuts down the listener and waits for all connections to finish.
|
|
||||||
func (s *Service) Close() error {
|
|
||||||
if s.Listener != nil {
|
|
||||||
s.Listener.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shut down all handlers.
|
|
||||||
close(s.closing)
|
|
||||||
// s.wg.Wait() // FIXME(benbjohnson)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleConn services an individual TCP connection.
|
|
||||||
func (s *Service) handleConn(conn net.Conn) {
|
|
||||||
// Ensure connection is closed when service is closed.
|
|
||||||
closing := make(chan struct{})
|
|
||||||
defer close(closing)
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case <-closing:
|
|
||||||
case <-s.closing:
|
|
||||||
}
|
|
||||||
conn.Close()
|
|
||||||
}()
|
|
||||||
|
|
||||||
s.Logger.Printf("accept remote write connection from %v\n", conn.RemoteAddr())
|
|
||||||
defer func() {
|
|
||||||
s.Logger.Printf("close remote write connection from %v\n", conn.RemoteAddr())
|
|
||||||
}()
|
|
||||||
for {
|
|
||||||
// Read type-length-value.
|
|
||||||
typ, buf, err := ReadTLV(conn)
|
|
||||||
if err != nil {
|
|
||||||
if strings.HasSuffix(err.Error(), "EOF") {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.Logger.Printf("unable to read type-length-value %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delegate message processing by type.
|
|
||||||
switch typ {
|
|
||||||
case writeShardRequestMessage:
|
|
||||||
err := s.processWriteShardRequest(buf)
|
|
||||||
if err != nil {
|
|
||||||
s.Logger.Printf("process write shard error: %s", err)
|
|
||||||
}
|
|
||||||
s.writeShardResponse(conn, err)
|
|
||||||
case mapShardRequestMessage:
|
|
||||||
err := s.processMapShardRequest(conn, buf)
|
|
||||||
if err != nil {
|
|
||||||
s.Logger.Printf("process map shard error: %s", err)
|
|
||||||
if err := writeMapShardResponseMessage(conn, NewMapShardResponse(1, err.Error())); err != nil {
|
|
||||||
s.Logger.Printf("process map shard error writing response: %s", err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
s.Logger.Printf("cluster service message type not found: %d", typ)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) processWriteShardRequest(buf []byte) error {
|
|
||||||
// Build request
|
|
||||||
var req WriteShardRequest
|
|
||||||
if err := req.UnmarshalBinary(buf); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err := s.TSDBStore.WriteToShard(req.ShardID(), req.Points())
|
|
||||||
|
|
||||||
// We may have received a write for a shard that we don't have locally because the
|
|
||||||
// sending node may have just created the shard (via the metastore) and the write
|
|
||||||
// arrived before the local store could create the shard. In this case, we need
|
|
||||||
// to check the metastore to determine what database and retention policy this
|
|
||||||
// shard should reside within.
|
|
||||||
if err == tsdb.ErrShardNotFound {
|
|
||||||
|
|
||||||
// Query the metastore for the owner of this shard
|
|
||||||
database, retentionPolicy, sgi := s.MetaStore.ShardOwner(req.ShardID())
|
|
||||||
if sgi == nil {
|
|
||||||
// If we can't find it, then we need to drop this request
|
|
||||||
// as it is no longer valid. This could happen if writes were queued via
|
|
||||||
// hinted handoff and delivered after a shard group was deleted.
|
|
||||||
s.Logger.Printf("drop write request: shard=%d", req.ShardID())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err = s.TSDBStore.CreateShard(database, retentionPolicy, req.ShardID())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return s.TSDBStore.WriteToShard(req.ShardID(), req.Points())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("write shard %d: %s", req.ShardID(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) writeShardResponse(w io.Writer, e error) {
|
|
||||||
// Build response.
|
|
||||||
var resp WriteShardResponse
|
|
||||||
if e != nil {
|
|
||||||
resp.SetCode(1)
|
|
||||||
resp.SetMessage(e.Error())
|
|
||||||
} else {
|
|
||||||
resp.SetCode(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal response to binary.
|
|
||||||
buf, err := resp.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
s.Logger.Printf("error marshalling shard response: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write to connection.
|
|
||||||
if err := WriteTLV(w, writeShardResponseMessage, buf); err != nil {
|
|
||||||
s.Logger.Printf("write shard response error: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) processMapShardRequest(w io.Writer, buf []byte) error {
|
|
||||||
// Decode request
|
|
||||||
var req MapShardRequest
|
|
||||||
if err := req.UnmarshalBinary(buf); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
m, err := s.TSDBStore.CreateMapper(req.ShardID(), req.Query(), int(req.ChunkSize()))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("create mapper: %s", err)
|
|
||||||
}
|
|
||||||
if m == nil {
|
|
||||||
return writeMapShardResponseMessage(w, NewMapShardResponse(0, ""))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := m.Open(); err != nil {
|
|
||||||
return fmt.Errorf("mapper open: %s", err)
|
|
||||||
}
|
|
||||||
defer m.Close()
|
|
||||||
|
|
||||||
var metaSent bool
|
|
||||||
for {
|
|
||||||
var resp MapShardResponse
|
|
||||||
|
|
||||||
if !metaSent {
|
|
||||||
resp.SetTagSets(m.TagSets())
|
|
||||||
resp.SetFields(m.Fields())
|
|
||||||
metaSent = true
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk, err := m.NextChunk()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("next chunk: %s", err)
|
|
||||||
}
|
|
||||||
if chunk != nil {
|
|
||||||
b, err := json.Marshal(chunk)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("encoding: %s", err)
|
|
||||||
}
|
|
||||||
resp.SetData(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write to connection.
|
|
||||||
resp.SetCode(0)
|
|
||||||
if err := writeMapShardResponseMessage(w, &resp); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if chunk == nil {
|
|
||||||
// All mapper data sent.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeMapShardResponseMessage(w io.Writer, msg *MapShardResponse) error {
|
|
||||||
buf, err := msg.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return WriteTLV(w, mapShardResponseMessage, buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadTLV reads a type-length-value record from r.
|
|
||||||
func ReadTLV(r io.Reader) (byte, []byte, error) {
|
|
||||||
var typ [1]byte
|
|
||||||
if _, err := io.ReadFull(r, typ[:]); err != nil {
|
|
||||||
return 0, nil, fmt.Errorf("read message type: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the size of the message.
|
|
||||||
var sz int64
|
|
||||||
if err := binary.Read(r, binary.BigEndian, &sz); err != nil {
|
|
||||||
return 0, nil, fmt.Errorf("read message size: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if sz == 0 {
|
|
||||||
return 0, nil, fmt.Errorf("invalid message size: %d", sz)
|
|
||||||
}
|
|
||||||
|
|
||||||
if sz >= MaxMessageSize {
|
|
||||||
return 0, nil, fmt.Errorf("max message size of %d exceeded: %d", MaxMessageSize, sz)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the value.
|
|
||||||
buf := make([]byte, sz)
|
|
||||||
if _, err := io.ReadFull(r, buf); err != nil {
|
|
||||||
return 0, nil, fmt.Errorf("read message value: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return typ[0], buf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteTLV writes a type-length-value record to w.
|
|
||||||
func WriteTLV(w io.Writer, typ byte, buf []byte) error {
|
|
||||||
if _, err := w.Write([]byte{typ}); err != nil {
|
|
||||||
return fmt.Errorf("write message type: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the size of the message.
|
|
||||||
if err := binary.Write(w, binary.BigEndian, int64(len(buf))); err != nil {
|
|
||||||
return fmt.Errorf("write message size: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the value.
|
|
||||||
if _, err := w.Write(buf); err != nil {
|
|
||||||
return fmt.Errorf("write message value: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,103 +0,0 @@
|
||||||
package cluster_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/cluster"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/tcp"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
type metaStore struct {
|
|
||||||
host string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *metaStore) Node(nodeID uint64) (*meta.NodeInfo, error) {
|
|
||||||
return &meta.NodeInfo{
|
|
||||||
ID: nodeID,
|
|
||||||
Host: m.host,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type testService struct {
|
|
||||||
nodeID uint64
|
|
||||||
ln net.Listener
|
|
||||||
muxln net.Listener
|
|
||||||
writeShardFunc func(shardID uint64, points []tsdb.Point) error
|
|
||||||
createShardFunc func(database, policy string, shardID uint64) error
|
|
||||||
createMapperFunc func(shardID uint64, query string, chunkSize int) (tsdb.Mapper, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTestWriteService(f func(shardID uint64, points []tsdb.Point) error) testService {
|
|
||||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
mux := tcp.NewMux()
|
|
||||||
muxln := mux.Listen(cluster.MuxHeader)
|
|
||||||
go mux.Serve(ln)
|
|
||||||
|
|
||||||
return testService{
|
|
||||||
writeShardFunc: f,
|
|
||||||
ln: ln,
|
|
||||||
muxln: muxln,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testService) Close() {
|
|
||||||
if ts.ln != nil {
|
|
||||||
ts.ln.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type serviceResponses []serviceResponse
|
|
||||||
type serviceResponse struct {
|
|
||||||
shardID uint64
|
|
||||||
ownerID uint64
|
|
||||||
points []tsdb.Point
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t testService) WriteToShard(shardID uint64, points []tsdb.Point) error {
|
|
||||||
return t.writeShardFunc(shardID, points)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t testService) CreateShard(database, policy string, shardID uint64) error {
|
|
||||||
return t.createShardFunc(database, policy, shardID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t testService) CreateMapper(shardID uint64, query string, chunkSize int) (tsdb.Mapper, error) {
|
|
||||||
return t.createMapperFunc(shardID, query, chunkSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeShardSuccess(shardID uint64, points []tsdb.Point) error {
|
|
||||||
responses <- &serviceResponse{
|
|
||||||
shardID: shardID,
|
|
||||||
points: points,
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeShardFail(shardID uint64, points []tsdb.Point) error {
|
|
||||||
return fmt.Errorf("failed to write")
|
|
||||||
}
|
|
||||||
|
|
||||||
var responses = make(chan *serviceResponse, 1024)
|
|
||||||
|
|
||||||
func (testService) ResponseN(n int) ([]*serviceResponse, error) {
|
|
||||||
var a []*serviceResponse
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case r := <-responses:
|
|
||||||
a = append(a, r)
|
|
||||||
if len(a) == n {
|
|
||||||
return a, nil
|
|
||||||
}
|
|
||||||
case <-time.After(time.Second):
|
|
||||||
return a, fmt.Errorf("unexpected response count: expected: %d, actual: %d", n, len(a))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,207 +0,0 @@
|
||||||
package cluster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math/rand"
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
"gopkg.in/fatih/pool.v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ShardMapper is responsible for providing mappers for requested shards. It is
|
|
||||||
// responsible for creating those mappers from the local store, or reaching
|
|
||||||
// out to another node on the cluster.
|
|
||||||
type ShardMapper struct {
|
|
||||||
ForceRemoteMapping bool // All shards treated as remote. Useful for testing.
|
|
||||||
|
|
||||||
MetaStore interface {
|
|
||||||
NodeID() uint64
|
|
||||||
Node(id uint64) (ni *meta.NodeInfo, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
TSDBStore interface {
|
|
||||||
CreateMapper(shardID uint64, query string, chunkSize int) (tsdb.Mapper, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
timeout time.Duration
|
|
||||||
pool *clientPool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewShardMapper returns a mapper of local and remote shards.
|
|
||||||
func NewShardMapper(timeout time.Duration) *ShardMapper {
|
|
||||||
return &ShardMapper{
|
|
||||||
pool: newClientPool(),
|
|
||||||
timeout: timeout,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateMapper returns a Mapper for the given shard ID.
|
|
||||||
func (s *ShardMapper) CreateMapper(sh meta.ShardInfo, stmt string, chunkSize int) (tsdb.Mapper, error) {
|
|
||||||
var err error
|
|
||||||
var m tsdb.Mapper
|
|
||||||
if sh.OwnedBy(s.MetaStore.NodeID()) && !s.ForceRemoteMapping {
|
|
||||||
m, err = s.TSDBStore.CreateMapper(sh.ID, stmt, chunkSize)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Pick a node in a pseudo-random manner.
|
|
||||||
conn, err := s.dial(sh.OwnerIDs[rand.Intn(len(sh.OwnerIDs))])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
conn.SetDeadline(time.Now().Add(s.timeout))
|
|
||||||
|
|
||||||
rm := NewRemoteMapper(conn.(*pool.PoolConn), sh.ID, stmt, chunkSize)
|
|
||||||
m = rm
|
|
||||||
}
|
|
||||||
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ShardMapper) dial(nodeID uint64) (net.Conn, error) {
|
|
||||||
// If we don't have a connection pool for that addr yet, create one
|
|
||||||
_, ok := s.pool.getPool(nodeID)
|
|
||||||
if !ok {
|
|
||||||
factory := &connFactory{nodeID: nodeID, clientPool: s.pool, timeout: s.timeout}
|
|
||||||
factory.metaStore = s.MetaStore
|
|
||||||
|
|
||||||
p, err := pool.NewChannelPool(1, 3, factory.dial)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s.pool.setPool(nodeID, p)
|
|
||||||
}
|
|
||||||
return s.pool.conn(nodeID)
|
|
||||||
}
|
|
||||||
|
|
||||||
type remoteShardConn interface {
|
|
||||||
io.ReadWriter
|
|
||||||
Close() error
|
|
||||||
MarkUnusable()
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoteMapper implements the tsdb.Mapper interface. It connects to a remote node,
|
|
||||||
// sends a query, and interprets the stream of data that comes back.
|
|
||||||
type RemoteMapper struct {
|
|
||||||
shardID uint64
|
|
||||||
stmt string
|
|
||||||
chunkSize int
|
|
||||||
|
|
||||||
tagsets []string
|
|
||||||
fields []string
|
|
||||||
|
|
||||||
conn remoteShardConn
|
|
||||||
bufferedResponse *MapShardResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRemoteMapper returns a new remote mapper using the given connection.
|
|
||||||
func NewRemoteMapper(c remoteShardConn, shardID uint64, stmt string, chunkSize int) *RemoteMapper {
|
|
||||||
return &RemoteMapper{
|
|
||||||
conn: c,
|
|
||||||
shardID: shardID,
|
|
||||||
stmt: stmt,
|
|
||||||
chunkSize: chunkSize,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open connects to the remote node and starts receiving data.
|
|
||||||
func (r *RemoteMapper) Open() (err error) {
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
r.conn.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// Build Map request.
|
|
||||||
var request MapShardRequest
|
|
||||||
request.SetShardID(r.shardID)
|
|
||||||
request.SetQuery(r.stmt)
|
|
||||||
request.SetChunkSize(int32(r.chunkSize))
|
|
||||||
|
|
||||||
// Marshal into protocol buffers.
|
|
||||||
buf, err := request.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write request.
|
|
||||||
if err := WriteTLV(r.conn, mapShardRequestMessage, buf); err != nil {
|
|
||||||
r.conn.MarkUnusable()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the response.
|
|
||||||
_, buf, err = ReadTLV(r.conn)
|
|
||||||
if err != nil {
|
|
||||||
r.conn.MarkUnusable()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal response.
|
|
||||||
r.bufferedResponse = &MapShardResponse{}
|
|
||||||
if err := r.bufferedResponse.UnmarshalBinary(buf); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.bufferedResponse.Code() != 0 {
|
|
||||||
return fmt.Errorf("error code %d: %s", r.bufferedResponse.Code(), r.bufferedResponse.Message())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode the first response to get the TagSets.
|
|
||||||
r.tagsets = r.bufferedResponse.TagSets()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RemoteMapper) TagSets() []string {
|
|
||||||
return r.tagsets
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RemoteMapper) Fields() []string {
|
|
||||||
return r.fields
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextChunk returns the next chunk read from the remote node to the client.
|
|
||||||
func (r *RemoteMapper) NextChunk() (chunk interface{}, err error) {
|
|
||||||
output := &tsdb.MapperOutput{}
|
|
||||||
var response *MapShardResponse
|
|
||||||
|
|
||||||
if r.bufferedResponse != nil {
|
|
||||||
response = r.bufferedResponse
|
|
||||||
r.bufferedResponse = nil
|
|
||||||
} else {
|
|
||||||
response = &MapShardResponse{}
|
|
||||||
|
|
||||||
// Read the response.
|
|
||||||
_, buf, err := ReadTLV(r.conn)
|
|
||||||
if err != nil {
|
|
||||||
r.conn.MarkUnusable()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal response.
|
|
||||||
if err := response.UnmarshalBinary(buf); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if response.Code() != 0 {
|
|
||||||
return nil, fmt.Errorf("error code %d: %s", response.Code(), response.Message())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if response.Data() == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(response.Data(), output)
|
|
||||||
return output, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the Mapper
|
|
||||||
func (r *RemoteMapper) Close() {
|
|
||||||
r.conn.Close()
|
|
||||||
}
|
|
96
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go
generated
vendored
96
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go
generated
vendored
|
@ -1,96 +0,0 @@
|
||||||
package cluster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
// remoteShardResponder implements the remoteShardConn interface.
|
|
||||||
type remoteShardResponder struct {
|
|
||||||
t *testing.T
|
|
||||||
rxBytes []byte
|
|
||||||
|
|
||||||
buffer *bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRemoteShardResponder(outputs []*tsdb.MapperOutput, tagsets []string) *remoteShardResponder {
|
|
||||||
r := &remoteShardResponder{}
|
|
||||||
a := make([]byte, 0, 1024)
|
|
||||||
r.buffer = bytes.NewBuffer(a)
|
|
||||||
|
|
||||||
// Pump the outputs in the buffer for later reading.
|
|
||||||
for _, o := range outputs {
|
|
||||||
resp := &MapShardResponse{}
|
|
||||||
resp.SetCode(0)
|
|
||||||
if o != nil {
|
|
||||||
d, _ := json.Marshal(o)
|
|
||||||
resp.SetData(d)
|
|
||||||
resp.SetTagSets(tagsets)
|
|
||||||
}
|
|
||||||
|
|
||||||
g, _ := resp.MarshalBinary()
|
|
||||||
WriteTLV(r.buffer, mapShardResponseMessage, g)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r remoteShardResponder) MarkUnusable() { return }
|
|
||||||
func (r remoteShardResponder) Close() error { return nil }
|
|
||||||
func (r remoteShardResponder) Read(p []byte) (n int, err error) {
|
|
||||||
return io.ReadFull(r.buffer, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r remoteShardResponder) Write(p []byte) (n int, err error) {
|
|
||||||
if r.rxBytes == nil {
|
|
||||||
r.rxBytes = make([]byte, 0)
|
|
||||||
}
|
|
||||||
r.rxBytes = append(r.rxBytes, p...)
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure a RemoteMapper can process valid responses from a remote shard.
|
|
||||||
func TestShardWriter_RemoteMapper_Success(t *testing.T) {
|
|
||||||
expTagSets := []string{"tagsetA"}
|
|
||||||
expOutput := &tsdb.MapperOutput{
|
|
||||||
Name: "cpu",
|
|
||||||
Tags: map[string]string{"host": "serverA"},
|
|
||||||
}
|
|
||||||
|
|
||||||
c := newRemoteShardResponder([]*tsdb.MapperOutput{expOutput, nil}, expTagSets)
|
|
||||||
|
|
||||||
r := NewRemoteMapper(c, 1234, "SELECT * FROM CPU", 10)
|
|
||||||
if err := r.Open(); err != nil {
|
|
||||||
t.Fatalf("failed to open remote mapper: %s", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.TagSets()[0] != expTagSets[0] {
|
|
||||||
t.Fatalf("incorrect tagsets received, exp %v, got %v", expTagSets, r.TagSets())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get first chunk from mapper.
|
|
||||||
chunk, err := r.NextChunk()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to get next chunk from mapper: %s", err.Error())
|
|
||||||
}
|
|
||||||
output, ok := chunk.(*tsdb.MapperOutput)
|
|
||||||
if !ok {
|
|
||||||
t.Fatal("chunk is not of expected type")
|
|
||||||
}
|
|
||||||
if output.Name != "cpu" {
|
|
||||||
t.Fatalf("received output incorrect, exp: %v, got %v", expOutput, output)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next chunk should be nil, indicating no more data.
|
|
||||||
chunk, err = r.NextChunk()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to get next chunk from mapper: %s", err.Error())
|
|
||||||
}
|
|
||||||
if chunk != nil {
|
|
||||||
t.Fatal("received more chunks when none expected")
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,163 +0,0 @@
|
||||||
package cluster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
"gopkg.in/fatih/pool.v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
writeShardRequestMessage byte = iota + 1
|
|
||||||
writeShardResponseMessage
|
|
||||||
mapShardRequestMessage
|
|
||||||
mapShardResponseMessage
|
|
||||||
)
|
|
||||||
|
|
||||||
// ShardWriter writes a set of points to a shard.
|
|
||||||
type ShardWriter struct {
|
|
||||||
pool *clientPool
|
|
||||||
timeout time.Duration
|
|
||||||
|
|
||||||
MetaStore interface {
|
|
||||||
Node(id uint64) (ni *meta.NodeInfo, err error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewShardWriter returns a new instance of ShardWriter.
|
|
||||||
func NewShardWriter(timeout time.Duration) *ShardWriter {
|
|
||||||
return &ShardWriter{
|
|
||||||
pool: newClientPool(),
|
|
||||||
timeout: timeout,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *ShardWriter) WriteShard(shardID, ownerID uint64, points []tsdb.Point) error {
|
|
||||||
c, err := w.dial(ownerID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
conn, ok := c.(*pool.PoolConn)
|
|
||||||
if !ok {
|
|
||||||
panic("wrong connection type")
|
|
||||||
}
|
|
||||||
defer func(conn net.Conn) {
|
|
||||||
conn.Close() // return to pool
|
|
||||||
}(conn)
|
|
||||||
|
|
||||||
// Build write request.
|
|
||||||
var request WriteShardRequest
|
|
||||||
request.SetShardID(shardID)
|
|
||||||
request.AddPoints(points)
|
|
||||||
|
|
||||||
// Marshal into protocol buffers.
|
|
||||||
buf, err := request.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write request.
|
|
||||||
conn.SetWriteDeadline(time.Now().Add(w.timeout))
|
|
||||||
if err := WriteTLV(conn, writeShardRequestMessage, buf); err != nil {
|
|
||||||
conn.MarkUnusable()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the response.
|
|
||||||
conn.SetReadDeadline(time.Now().Add(w.timeout))
|
|
||||||
_, buf, err = ReadTLV(conn)
|
|
||||||
if err != nil {
|
|
||||||
conn.MarkUnusable()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal response.
|
|
||||||
var response WriteShardResponse
|
|
||||||
if err := response.UnmarshalBinary(buf); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if response.Code() != 0 {
|
|
||||||
return fmt.Errorf("error code %d: %s", response.Code(), response.Message())
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ShardWriter) dial(nodeID uint64) (net.Conn, error) {
|
|
||||||
// If we don't have a connection pool for that addr yet, create one
|
|
||||||
_, ok := c.pool.getPool(nodeID)
|
|
||||||
if !ok {
|
|
||||||
factory := &connFactory{nodeID: nodeID, clientPool: c.pool, timeout: c.timeout}
|
|
||||||
factory.metaStore = c.MetaStore
|
|
||||||
|
|
||||||
p, err := pool.NewChannelPool(1, 3, factory.dial)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c.pool.setPool(nodeID, p)
|
|
||||||
}
|
|
||||||
return c.pool.conn(nodeID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *ShardWriter) Close() error {
|
|
||||||
if w.pool == nil {
|
|
||||||
return fmt.Errorf("client already closed")
|
|
||||||
}
|
|
||||||
w.pool.close()
|
|
||||||
w.pool = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxConnections = 500
|
|
||||||
maxRetries = 3
|
|
||||||
)
|
|
||||||
|
|
||||||
var errMaxConnectionsExceeded = fmt.Errorf("can not exceed max connections of %d", maxConnections)
|
|
||||||
|
|
||||||
type connFactory struct {
|
|
||||||
nodeID uint64
|
|
||||||
timeout time.Duration
|
|
||||||
|
|
||||||
clientPool interface {
|
|
||||||
size() int
|
|
||||||
}
|
|
||||||
|
|
||||||
metaStore interface {
|
|
||||||
Node(id uint64) (ni *meta.NodeInfo, err error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *connFactory) dial() (net.Conn, error) {
|
|
||||||
if c.clientPool.size() > maxConnections {
|
|
||||||
return nil, errMaxConnectionsExceeded
|
|
||||||
}
|
|
||||||
|
|
||||||
ni, err := c.metaStore.Node(c.nodeID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if ni == nil {
|
|
||||||
return nil, fmt.Errorf("node %d does not exist", c.nodeID)
|
|
||||||
}
|
|
||||||
|
|
||||||
conn, err := net.DialTimeout("tcp", ni.Host, c.timeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write a marker byte for cluster messages.
|
|
||||||
_, err = conn.Write([]byte{MuxHeader})
|
|
||||||
if err != nil {
|
|
||||||
conn.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return conn, nil
|
|
||||||
}
|
|
186
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer_test.go
generated
vendored
186
Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer_test.go
generated
vendored
|
@ -1,186 +0,0 @@
|
||||||
package cluster_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/cluster"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ensure the shard writer can successful write a single request.
|
|
||||||
func TestShardWriter_WriteShard_Success(t *testing.T) {
|
|
||||||
ts := newTestWriteService(writeShardSuccess)
|
|
||||||
s := cluster.NewService(cluster.Config{})
|
|
||||||
s.Listener = ts.muxln
|
|
||||||
s.TSDBStore = ts
|
|
||||||
if err := s.Open(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer s.Close()
|
|
||||||
defer ts.Close()
|
|
||||||
|
|
||||||
w := cluster.NewShardWriter(time.Minute)
|
|
||||||
w.MetaStore = &metaStore{host: ts.ln.Addr().String()}
|
|
||||||
|
|
||||||
// Build a single point.
|
|
||||||
now := time.Now()
|
|
||||||
var points []tsdb.Point
|
|
||||||
points = append(points, tsdb.NewPoint("cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now))
|
|
||||||
|
|
||||||
// Write to shard and close.
|
|
||||||
if err := w.WriteShard(1, 2, points); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if err := w.Close(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate response.
|
|
||||||
responses, err := ts.ResponseN(1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if responses[0].shardID != 1 {
|
|
||||||
t.Fatalf("unexpected shard id: %d", responses[0].shardID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate point.
|
|
||||||
if p := responses[0].points[0]; p.Name() != "cpu" {
|
|
||||||
t.Fatalf("unexpected name: %s", p.Name())
|
|
||||||
} else if p.Fields()["value"] != int64(100) {
|
|
||||||
t.Fatalf("unexpected 'value' field: %d", p.Fields()["value"])
|
|
||||||
} else if p.Tags()["host"] != "server01" {
|
|
||||||
t.Fatalf("unexpected 'host' tag: %s", p.Tags()["host"])
|
|
||||||
} else if p.Time().UnixNano() != now.UnixNano() {
|
|
||||||
t.Fatalf("unexpected time: %s", p.Time())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the shard writer can successful write a multiple requests.
|
|
||||||
func TestShardWriter_WriteShard_Multiple(t *testing.T) {
|
|
||||||
ts := newTestWriteService(writeShardSuccess)
|
|
||||||
s := cluster.NewService(cluster.Config{})
|
|
||||||
s.Listener = ts.muxln
|
|
||||||
s.TSDBStore = ts
|
|
||||||
if err := s.Open(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer s.Close()
|
|
||||||
defer ts.Close()
|
|
||||||
|
|
||||||
w := cluster.NewShardWriter(time.Minute)
|
|
||||||
w.MetaStore = &metaStore{host: ts.ln.Addr().String()}
|
|
||||||
|
|
||||||
// Build a single point.
|
|
||||||
now := time.Now()
|
|
||||||
var points []tsdb.Point
|
|
||||||
points = append(points, tsdb.NewPoint("cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now))
|
|
||||||
|
|
||||||
// Write to shard twice and close.
|
|
||||||
if err := w.WriteShard(1, 2, points); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if err := w.WriteShard(1, 2, points); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if err := w.Close(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate response.
|
|
||||||
responses, err := ts.ResponseN(1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if responses[0].shardID != 1 {
|
|
||||||
t.Fatalf("unexpected shard id: %d", responses[0].shardID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate point.
|
|
||||||
if p := responses[0].points[0]; p.Name() != "cpu" {
|
|
||||||
t.Fatalf("unexpected name: %s", p.Name())
|
|
||||||
} else if p.Fields()["value"] != int64(100) {
|
|
||||||
t.Fatalf("unexpected 'value' field: %d", p.Fields()["value"])
|
|
||||||
} else if p.Tags()["host"] != "server01" {
|
|
||||||
t.Fatalf("unexpected 'host' tag: %s", p.Tags()["host"])
|
|
||||||
} else if p.Time().UnixNano() != now.UnixNano() {
|
|
||||||
t.Fatalf("unexpected time: %s", p.Time())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the shard writer returns an error when the server fails to accept the write.
|
|
||||||
func TestShardWriter_WriteShard_Error(t *testing.T) {
|
|
||||||
ts := newTestWriteService(writeShardFail)
|
|
||||||
s := cluster.NewService(cluster.Config{})
|
|
||||||
s.Listener = ts.muxln
|
|
||||||
s.TSDBStore = ts
|
|
||||||
if err := s.Open(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer s.Close()
|
|
||||||
defer ts.Close()
|
|
||||||
|
|
||||||
w := cluster.NewShardWriter(time.Minute)
|
|
||||||
w.MetaStore = &metaStore{host: ts.ln.Addr().String()}
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
shardID := uint64(1)
|
|
||||||
ownerID := uint64(2)
|
|
||||||
var points []tsdb.Point
|
|
||||||
points = append(points, tsdb.NewPoint(
|
|
||||||
"cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now,
|
|
||||||
))
|
|
||||||
|
|
||||||
if err := w.WriteShard(shardID, ownerID, points); err == nil || err.Error() != "error code 1: write shard 1: failed to write" {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the shard writer returns an error when dialing times out.
|
|
||||||
func TestShardWriter_Write_ErrDialTimeout(t *testing.T) {
|
|
||||||
ts := newTestWriteService(writeShardSuccess)
|
|
||||||
s := cluster.NewService(cluster.Config{})
|
|
||||||
s.Listener = ts.muxln
|
|
||||||
s.TSDBStore = ts
|
|
||||||
if err := s.Open(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer s.Close()
|
|
||||||
defer ts.Close()
|
|
||||||
|
|
||||||
w := cluster.NewShardWriter(time.Nanosecond)
|
|
||||||
w.MetaStore = &metaStore{host: ts.ln.Addr().String()}
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
shardID := uint64(1)
|
|
||||||
ownerID := uint64(2)
|
|
||||||
var points []tsdb.Point
|
|
||||||
points = append(points, tsdb.NewPoint(
|
|
||||||
"cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now,
|
|
||||||
))
|
|
||||||
|
|
||||||
if err, exp := w.WriteShard(shardID, ownerID, points), "i/o timeout"; err == nil || !strings.Contains(err.Error(), exp) {
|
|
||||||
t.Fatalf("expected error %v, to contain %s", err, exp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the shard writer returns an error when reading times out.
|
|
||||||
func TestShardWriter_Write_ErrReadTimeout(t *testing.T) {
|
|
||||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
w := cluster.NewShardWriter(time.Millisecond)
|
|
||||||
w.MetaStore = &metaStore{host: ln.Addr().String()}
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
shardID := uint64(1)
|
|
||||||
ownerID := uint64(2)
|
|
||||||
var points []tsdb.Point
|
|
||||||
points = append(points, tsdb.NewPoint(
|
|
||||||
"cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now,
|
|
||||||
))
|
|
||||||
|
|
||||||
if err := w.WriteShard(shardID, ownerID, points); err == nil || !strings.Contains(err.Error(), "i/o timeout") {
|
|
||||||
t.Fatalf("unexpected error: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,724 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/csv"
|
|
||||||
"encoding/json"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"os/user"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"text/tabwriter"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/client"
|
|
||||||
"github.com/influxdb/influxdb/importer/v8"
|
|
||||||
"github.com/peterh/liner"
|
|
||||||
)
|
|
||||||
|
|
||||||
// These variables are populated via the Go linker.
|
|
||||||
var (
|
|
||||||
version string = "0.9"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// defaultFormat is the default format of the results when issuing queries
|
|
||||||
defaultFormat = "column"
|
|
||||||
|
|
||||||
// defaultPPS is the default points per second that the import will throttle at
|
|
||||||
// by default it's 0, which means it will not throttle
|
|
||||||
defaultPPS = 0
|
|
||||||
)
|
|
||||||
|
|
||||||
type CommandLine struct {
|
|
||||||
Client *client.Client
|
|
||||||
Line *liner.State
|
|
||||||
Host string
|
|
||||||
Port int
|
|
||||||
Username string
|
|
||||||
Password string
|
|
||||||
Database string
|
|
||||||
Ssl bool
|
|
||||||
RetentionPolicy string
|
|
||||||
Version string
|
|
||||||
Pretty bool // controls pretty print for json
|
|
||||||
Format string // controls the output format. Valid values are json, csv, or column
|
|
||||||
Execute string
|
|
||||||
ShowVersion bool
|
|
||||||
Import bool
|
|
||||||
PPS int // Controls how many points per second the import will allow via throttling
|
|
||||||
Path string
|
|
||||||
Compressed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
c := CommandLine{}
|
|
||||||
|
|
||||||
fs := flag.NewFlagSet("InfluxDB shell version "+version, flag.ExitOnError)
|
|
||||||
fs.StringVar(&c.Host, "host", client.DefaultHost, "Influxdb host to connect to.")
|
|
||||||
fs.IntVar(&c.Port, "port", client.DefaultPort, "Influxdb port to connect to.")
|
|
||||||
fs.StringVar(&c.Username, "username", c.Username, "Username to connect to the server.")
|
|
||||||
fs.StringVar(&c.Password, "password", c.Password, `Password to connect to the server. Leaving blank will prompt for password (--password="").`)
|
|
||||||
fs.StringVar(&c.Database, "database", c.Database, "Database to connect to the server.")
|
|
||||||
fs.BoolVar(&c.Ssl, "ssl", false, "Use https for connecting to cluster.")
|
|
||||||
fs.StringVar(&c.Format, "format", defaultFormat, "Format specifies the format of the server responses: json, csv, or column.")
|
|
||||||
fs.BoolVar(&c.Pretty, "pretty", false, "Turns on pretty print for the json format.")
|
|
||||||
fs.StringVar(&c.Execute, "execute", c.Execute, "Execute command and quit.")
|
|
||||||
fs.BoolVar(&c.ShowVersion, "version", false, "Displays the InfluxDB version.")
|
|
||||||
fs.BoolVar(&c.Import, "import", false, "Import a previous database.")
|
|
||||||
fs.IntVar(&c.PPS, "pps", defaultPPS, "How many points per second the import will allow. By default it is zero and will not throttle importing.")
|
|
||||||
fs.StringVar(&c.Path, "path", "", "path to the file to import")
|
|
||||||
fs.BoolVar(&c.Compressed, "compressed", false, "set to true if the import file is compressed")
|
|
||||||
|
|
||||||
// Define our own custom usage to print
|
|
||||||
fs.Usage = func() {
|
|
||||||
fmt.Println(`Usage of influx:
|
|
||||||
-version
|
|
||||||
Display the version and exit.
|
|
||||||
-host 'host name'
|
|
||||||
Host to connect to.
|
|
||||||
-port 'port #'
|
|
||||||
Port to connect to.
|
|
||||||
-database 'database name'
|
|
||||||
Database to connect to the server.
|
|
||||||
-password 'password'
|
|
||||||
Password to connect to the server. Leaving blank will prompt for password (--password '').
|
|
||||||
-username 'username'
|
|
||||||
Username to connect to the server.
|
|
||||||
-ssl
|
|
||||||
Use https for requests.
|
|
||||||
-execute 'command'
|
|
||||||
Execute command and quit.
|
|
||||||
-format 'json|csv|column'
|
|
||||||
Format specifies the format of the server responses: json, csv, or column.
|
|
||||||
-pretty
|
|
||||||
Turns on pretty print for the json format.
|
|
||||||
-import
|
|
||||||
Import a previous database export from file
|
|
||||||
-pps
|
|
||||||
How many points per second the import will allow. By default it is zero and will not throttle importing.
|
|
||||||
-path
|
|
||||||
Path to file to import
|
|
||||||
-compressed
|
|
||||||
Set to true if the import file is compressed
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
# Use influx in a non-interactive mode to query the database "metrics" and pretty print json:
|
|
||||||
$ influx -database 'metrics' -execute 'select * from cpu' -format 'json' -pretty
|
|
||||||
|
|
||||||
# Connect to a specific database on startup and set database context:
|
|
||||||
$ influx -database 'metrics' -host 'localhost' -port '8086'
|
|
||||||
`)
|
|
||||||
}
|
|
||||||
fs.Parse(os.Args[1:])
|
|
||||||
|
|
||||||
if c.ShowVersion {
|
|
||||||
showVersion()
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
var promptForPassword bool
|
|
||||||
// determine if they set the password flag but provided no value
|
|
||||||
for _, v := range os.Args {
|
|
||||||
v = strings.ToLower(v)
|
|
||||||
if (strings.HasPrefix(v, "-password") || strings.HasPrefix(v, "--password")) && c.Password == "" {
|
|
||||||
promptForPassword = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Line = liner.NewLiner()
|
|
||||||
defer c.Line.Close()
|
|
||||||
|
|
||||||
if promptForPassword {
|
|
||||||
p, e := c.Line.PasswordPrompt("password: ")
|
|
||||||
if e != nil {
|
|
||||||
fmt.Println("Unable to parse password.")
|
|
||||||
} else {
|
|
||||||
c.Password = p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.connect(""); err != nil {
|
|
||||||
|
|
||||||
}
|
|
||||||
if c.Execute == "" && !c.Import {
|
|
||||||
fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.Version)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Execute != "" {
|
|
||||||
if err := c.ExecuteQuery(c.Execute); err != nil {
|
|
||||||
c.Line.Close()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
c.Line.Close()
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Import {
|
|
||||||
path := net.JoinHostPort(c.Host, strconv.Itoa(c.Port))
|
|
||||||
u, e := client.ParseConnectionString(path, c.Ssl)
|
|
||||||
if e != nil {
|
|
||||||
fmt.Println(e)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
config := v8.NewConfig()
|
|
||||||
config.Username = c.Username
|
|
||||||
config.Password = c.Password
|
|
||||||
config.Precision = "ns"
|
|
||||||
config.WriteConsistency = "any"
|
|
||||||
config.Path = c.Path
|
|
||||||
config.Version = version
|
|
||||||
config.URL = u
|
|
||||||
config.Compressed = c.Compressed
|
|
||||||
config.PPS = c.PPS
|
|
||||||
|
|
||||||
i := v8.NewImporter(config)
|
|
||||||
if err := i.Import(); err != nil {
|
|
||||||
fmt.Printf("ERROR: %s\n", err)
|
|
||||||
c.Line.Close()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
c.Line.Close()
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
showVersion()
|
|
||||||
|
|
||||||
var historyFile string
|
|
||||||
usr, err := user.Current()
|
|
||||||
// Only load history if we can get the user
|
|
||||||
if err == nil {
|
|
||||||
historyFile = filepath.Join(usr.HomeDir, ".influx_history")
|
|
||||||
|
|
||||||
if f, err := os.Open(historyFile); err == nil {
|
|
||||||
c.Line.ReadHistory(f)
|
|
||||||
f.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
l, e := c.Line.Prompt("> ")
|
|
||||||
if e != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if c.ParseCommand(l) {
|
|
||||||
// write out the history
|
|
||||||
if len(historyFile) > 0 {
|
|
||||||
c.Line.AppendHistory(l)
|
|
||||||
if f, err := os.Create(historyFile); err == nil {
|
|
||||||
c.Line.WriteHistory(f)
|
|
||||||
f.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
break // exit main loop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func showVersion() {
|
|
||||||
fmt.Println("InfluxDB shell " + version)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) ParseCommand(cmd string) bool {
|
|
||||||
lcmd := strings.TrimSpace(strings.ToLower(cmd))
|
|
||||||
switch {
|
|
||||||
case strings.HasPrefix(lcmd, "exit"):
|
|
||||||
// signal the program to exit
|
|
||||||
return false
|
|
||||||
case strings.HasPrefix(lcmd, "gopher"):
|
|
||||||
c.gopher()
|
|
||||||
case strings.HasPrefix(lcmd, "connect"):
|
|
||||||
c.connect(cmd)
|
|
||||||
case strings.HasPrefix(lcmd, "auth"):
|
|
||||||
c.SetAuth(cmd)
|
|
||||||
case strings.HasPrefix(lcmd, "help"):
|
|
||||||
c.help()
|
|
||||||
case strings.HasPrefix(lcmd, "format"):
|
|
||||||
c.SetFormat(cmd)
|
|
||||||
case strings.HasPrefix(lcmd, "settings"):
|
|
||||||
c.Settings()
|
|
||||||
case strings.HasPrefix(lcmd, "pretty"):
|
|
||||||
c.Pretty = !c.Pretty
|
|
||||||
if c.Pretty {
|
|
||||||
fmt.Println("Pretty print enabled")
|
|
||||||
} else {
|
|
||||||
fmt.Println("Pretty print disabled")
|
|
||||||
}
|
|
||||||
case strings.HasPrefix(lcmd, "use"):
|
|
||||||
c.use(cmd)
|
|
||||||
case strings.HasPrefix(lcmd, "insert"):
|
|
||||||
c.Insert(cmd)
|
|
||||||
case lcmd == "":
|
|
||||||
break
|
|
||||||
default:
|
|
||||||
c.ExecuteQuery(cmd)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) connect(cmd string) error {
|
|
||||||
var cl *client.Client
|
|
||||||
var u url.URL
|
|
||||||
|
|
||||||
// Remove the "connect" keyword if it exists
|
|
||||||
path := strings.TrimSpace(strings.Replace(cmd, "connect", "", -1))
|
|
||||||
|
|
||||||
// If they didn't provide a connection string, use the current settings
|
|
||||||
if path == "" {
|
|
||||||
path = net.JoinHostPort(c.Host, strconv.Itoa(c.Port))
|
|
||||||
}
|
|
||||||
|
|
||||||
var e error
|
|
||||||
u, e = client.ParseConnectionString(path, c.Ssl)
|
|
||||||
if e != nil {
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
config := client.NewConfig()
|
|
||||||
config.URL = u
|
|
||||||
config.Username = c.Username
|
|
||||||
config.Password = c.Password
|
|
||||||
config.UserAgent = "InfluxDBShell/" + version
|
|
||||||
cl, err := client.NewClient(config)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Could not create client %s", err)
|
|
||||||
}
|
|
||||||
c.Client = cl
|
|
||||||
if _, v, e := c.Client.Ping(); e != nil {
|
|
||||||
return fmt.Errorf("Failed to connect to %s\n", c.Client.Addr())
|
|
||||||
} else {
|
|
||||||
c.Version = v
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) SetAuth(cmd string) {
|
|
||||||
// If they pass in the entire command, we should parse it
|
|
||||||
// auth <username> <password>
|
|
||||||
args := strings.Fields(cmd)
|
|
||||||
if len(args) == 3 {
|
|
||||||
args = args[1:]
|
|
||||||
} else {
|
|
||||||
args = []string{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(args) == 2 {
|
|
||||||
c.Username = args[0]
|
|
||||||
c.Password = args[1]
|
|
||||||
} else {
|
|
||||||
u, e := c.Line.Prompt("username: ")
|
|
||||||
if e != nil {
|
|
||||||
fmt.Printf("Unable to process input: %s", e)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.Username = strings.TrimSpace(u)
|
|
||||||
p, e := c.Line.PasswordPrompt("password: ")
|
|
||||||
if e != nil {
|
|
||||||
fmt.Printf("Unable to process input: %s", e)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.Password = p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the client as well
|
|
||||||
c.Client.SetAuth(c.Username, c.Password)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) use(cmd string) {
|
|
||||||
args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ")
|
|
||||||
if len(args) != 2 {
|
|
||||||
fmt.Printf("Could not parse database name from %q.\n", cmd)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d := args[1]
|
|
||||||
c.Database = d
|
|
||||||
fmt.Printf("Using database %s\n", d)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) SetFormat(cmd string) {
|
|
||||||
// Remove the "format" keyword if it exists
|
|
||||||
cmd = strings.TrimSpace(strings.Replace(cmd, "format", "", -1))
|
|
||||||
// normalize cmd
|
|
||||||
cmd = strings.ToLower(cmd)
|
|
||||||
|
|
||||||
switch cmd {
|
|
||||||
case "json", "csv", "column":
|
|
||||||
c.Format = cmd
|
|
||||||
default:
|
|
||||||
fmt.Printf("Unknown format %q. Please use json, csv, or column.\n", cmd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// isWhitespace returns true if the rune is a space, tab, or newline.
|
|
||||||
func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' }
|
|
||||||
|
|
||||||
// isLetter returns true if the rune is a letter.
|
|
||||||
func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') }
|
|
||||||
|
|
||||||
// isDigit returns true if the rune is a digit.
|
|
||||||
func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') }
|
|
||||||
|
|
||||||
// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer.
|
|
||||||
func isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' }
|
|
||||||
|
|
||||||
// isIdentChar returns true if the rune can be used in an unquoted identifier.
|
|
||||||
func isNotIdentChar(ch rune) bool { return !(isLetter(ch) || isDigit(ch) || ch == '_') }
|
|
||||||
|
|
||||||
func parseUnquotedIdentifier(stmt string) (string, string) {
|
|
||||||
if fields := strings.FieldsFunc(stmt, isNotIdentChar); len(fields) > 0 {
|
|
||||||
return fields[0], strings.TrimPrefix(stmt, fields[0])
|
|
||||||
}
|
|
||||||
return "", stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseDoubleQuotedIdentifier(stmt string) (string, string) {
|
|
||||||
escapeNext := false
|
|
||||||
fields := strings.FieldsFunc(stmt, func(ch rune) bool {
|
|
||||||
if ch == '\\' {
|
|
||||||
escapeNext = true
|
|
||||||
} else if ch == '"' {
|
|
||||||
if !escapeNext {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
escapeNext = false
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
if len(fields) > 0 {
|
|
||||||
return fields[0], strings.TrimPrefix(stmt, "\""+fields[0]+"\"")
|
|
||||||
}
|
|
||||||
return "", stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseNextIdentifier(stmt string) (ident, remainder string) {
|
|
||||||
if len(stmt) > 0 {
|
|
||||||
switch {
|
|
||||||
case isWhitespace(rune(stmt[0])):
|
|
||||||
return parseNextIdentifier(stmt[1:])
|
|
||||||
case isIdentFirstChar(rune(stmt[0])):
|
|
||||||
return parseUnquotedIdentifier(stmt)
|
|
||||||
case stmt[0] == '"':
|
|
||||||
return parseDoubleQuotedIdentifier(stmt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) parseInto(stmt string) string {
|
|
||||||
ident, stmt := parseNextIdentifier(stmt)
|
|
||||||
if strings.HasPrefix(stmt, ".") {
|
|
||||||
c.Database = ident
|
|
||||||
fmt.Printf("Using database %s\n", c.Database)
|
|
||||||
ident, stmt = parseNextIdentifier(stmt[1:])
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(stmt, " ") {
|
|
||||||
c.RetentionPolicy = ident
|
|
||||||
fmt.Printf("Using retention policy %s\n", c.RetentionPolicy)
|
|
||||||
return stmt[1:]
|
|
||||||
}
|
|
||||||
return stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) Insert(stmt string) error {
|
|
||||||
i, point := parseNextIdentifier(stmt)
|
|
||||||
if !strings.EqualFold(i, "insert") {
|
|
||||||
fmt.Printf("ERR: found %s, expected INSERT\n", i)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if i, r := parseNextIdentifier(point); strings.EqualFold(i, "into") {
|
|
||||||
point = c.parseInto(r)
|
|
||||||
}
|
|
||||||
_, err := c.Client.Write(client.BatchPoints{
|
|
||||||
Points: []client.Point{
|
|
||||||
client.Point{Raw: point},
|
|
||||||
},
|
|
||||||
Database: c.Database,
|
|
||||||
RetentionPolicy: c.RetentionPolicy,
|
|
||||||
Precision: "n",
|
|
||||||
WriteConsistency: client.ConsistencyAny,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("ERR: %s\n", err)
|
|
||||||
if c.Database == "" {
|
|
||||||
fmt.Println("Note: error may be due to not setting a database or retention policy.")
|
|
||||||
fmt.Println(`Please set a database with the command "use <database>" or`)
|
|
||||||
fmt.Println("INSERT INTO <database>.<retention-policy> <point>")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) ExecuteQuery(query string) error {
|
|
||||||
response, err := c.Client.Query(client.Query{Command: query, Database: c.Database})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("ERR: %s\n", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.FormatResponse(response, os.Stdout)
|
|
||||||
if err := response.Error(); err != nil {
|
|
||||||
fmt.Printf("ERR: %s\n", response.Error())
|
|
||||||
if c.Database == "" {
|
|
||||||
fmt.Println("Warning: It is possible this error is due to not setting a database.")
|
|
||||||
fmt.Println(`Please set a database with the command "use <database>".`)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) FormatResponse(response *client.Response, w io.Writer) {
|
|
||||||
switch c.Format {
|
|
||||||
case "json":
|
|
||||||
c.writeJSON(response, w)
|
|
||||||
case "csv":
|
|
||||||
c.writeCSV(response, w)
|
|
||||||
case "column":
|
|
||||||
c.writeColumns(response, w)
|
|
||||||
default:
|
|
||||||
fmt.Fprintf(w, "Unknown output format %q.\n", c.Format)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) writeJSON(response *client.Response, w io.Writer) {
|
|
||||||
var data []byte
|
|
||||||
var err error
|
|
||||||
if c.Pretty {
|
|
||||||
data, err = json.MarshalIndent(response, "", " ")
|
|
||||||
} else {
|
|
||||||
data, err = json.Marshal(response)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(w, "Unable to parse json: %s\n", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fmt.Fprintln(w, string(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) writeCSV(response *client.Response, w io.Writer) {
|
|
||||||
csvw := csv.NewWriter(w)
|
|
||||||
for _, result := range response.Results {
|
|
||||||
// Create a tabbed writer for each result as they won't always line up
|
|
||||||
rows := c.formatResults(result, "\t")
|
|
||||||
for _, r := range rows {
|
|
||||||
csvw.Write(strings.Split(r, "\t"))
|
|
||||||
}
|
|
||||||
csvw.Flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) writeColumns(response *client.Response, w io.Writer) {
|
|
||||||
for _, result := range response.Results {
|
|
||||||
// Create a tabbed writer for each result a they won't always line up
|
|
||||||
w := new(tabwriter.Writer)
|
|
||||||
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
|
|
||||||
csv := c.formatResults(result, "\t")
|
|
||||||
for _, r := range csv {
|
|
||||||
fmt.Fprintln(w, r)
|
|
||||||
}
|
|
||||||
w.Flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatResults will behave differently if you are formatting for columns or csv
|
|
||||||
func (c *CommandLine) formatResults(result client.Result, separator string) []string {
|
|
||||||
rows := []string{}
|
|
||||||
// Create a tabbed writer for each result a they won't always line up
|
|
||||||
for i, row := range result.Series {
|
|
||||||
// gather tags
|
|
||||||
tags := []string{}
|
|
||||||
for k, v := range row.Tags {
|
|
||||||
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
|
|
||||||
sort.Strings(tags)
|
|
||||||
}
|
|
||||||
|
|
||||||
columnNames := []string{}
|
|
||||||
|
|
||||||
// Only put name/tags in a column if format is csv
|
|
||||||
if c.Format == "csv" {
|
|
||||||
if len(tags) > 0 {
|
|
||||||
columnNames = append([]string{"tags"}, columnNames...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if row.Name != "" {
|
|
||||||
columnNames = append([]string{"name"}, columnNames...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, column := range row.Columns {
|
|
||||||
columnNames = append(columnNames, column)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output a line separator if we have more than one set or results and format is column
|
|
||||||
if i > 0 && c.Format == "column" {
|
|
||||||
rows = append(rows, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we are column format, we break out the name/tag to seperate lines
|
|
||||||
if c.Format == "column" {
|
|
||||||
if row.Name != "" {
|
|
||||||
n := fmt.Sprintf("name: %s", row.Name)
|
|
||||||
rows = append(rows, n)
|
|
||||||
if len(tags) == 0 {
|
|
||||||
l := strings.Repeat("-", len(n))
|
|
||||||
rows = append(rows, l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(tags) > 0 {
|
|
||||||
t := fmt.Sprintf("tags: %s", (strings.Join(tags, ", ")))
|
|
||||||
rows = append(rows, t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rows = append(rows, strings.Join(columnNames, separator))
|
|
||||||
|
|
||||||
// if format is column, break tags to their own line/format
|
|
||||||
if c.Format == "column" && len(tags) > 0 {
|
|
||||||
lines := []string{}
|
|
||||||
for _, columnName := range columnNames {
|
|
||||||
lines = append(lines, strings.Repeat("-", len(columnName)))
|
|
||||||
}
|
|
||||||
rows = append(rows, strings.Join(lines, separator))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range row.Values {
|
|
||||||
var values []string
|
|
||||||
if c.Format == "csv" {
|
|
||||||
if row.Name != "" {
|
|
||||||
values = append(values, row.Name)
|
|
||||||
}
|
|
||||||
if len(tags) > 0 {
|
|
||||||
values = append(values, strings.Join(tags, ","))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, vv := range v {
|
|
||||||
values = append(values, interfaceToString(vv))
|
|
||||||
}
|
|
||||||
rows = append(rows, strings.Join(values, separator))
|
|
||||||
}
|
|
||||||
// Outout a line separator if in column format
|
|
||||||
if c.Format == "column" {
|
|
||||||
rows = append(rows, "")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return rows
|
|
||||||
}
|
|
||||||
|
|
||||||
func interfaceToString(v interface{}) string {
|
|
||||||
switch t := v.(type) {
|
|
||||||
case nil:
|
|
||||||
return ""
|
|
||||||
case bool:
|
|
||||||
return fmt.Sprintf("%v", v)
|
|
||||||
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr:
|
|
||||||
return fmt.Sprintf("%d", t)
|
|
||||||
case float32, float64:
|
|
||||||
return fmt.Sprintf("%v", t)
|
|
||||||
default:
|
|
||||||
return fmt.Sprintf("%v", t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) Settings() {
|
|
||||||
w := new(tabwriter.Writer)
|
|
||||||
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
|
|
||||||
if c.Port > 0 {
|
|
||||||
fmt.Fprintf(w, "Host\t%s:%d\n", c.Host, c.Port)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(w, "Host\t%s\n", c.Host)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "Username\t%s\n", c.Username)
|
|
||||||
fmt.Fprintf(w, "Database\t%s\n", c.Database)
|
|
||||||
fmt.Fprintf(w, "Pretty\t%v\n", c.Pretty)
|
|
||||||
fmt.Fprintf(w, "Format\t%s\n", c.Format)
|
|
||||||
fmt.Fprintln(w)
|
|
||||||
w.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) help() {
|
|
||||||
fmt.Println(`Usage:
|
|
||||||
connect <host:port> connect to another node
|
|
||||||
auth prompt for username and password
|
|
||||||
pretty toggle pretty print
|
|
||||||
use <db_name> set current databases
|
|
||||||
format <format> set the output format: json, csv, or column
|
|
||||||
settings output the current settings for the shell
|
|
||||||
exit quit the influx shell
|
|
||||||
|
|
||||||
show databases show database names
|
|
||||||
show series show series information
|
|
||||||
show measurements show measurement information
|
|
||||||
show tag keys show tag key information
|
|
||||||
show tag values show tag value information
|
|
||||||
|
|
||||||
a full list of influxql commands can be found at:
|
|
||||||
https://influxdb.com/docs/v0.9/query_language/spec.html
|
|
||||||
`)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandLine) gopher() {
|
|
||||||
fmt.Println(`
|
|
||||||
.-::-::://:-::- .:/++/'
|
|
||||||
'://:-''/oo+//++o+/.://o- ./+:
|
|
||||||
.:-. '++- .o/ '+yydhy' o-
|
|
||||||
.:/. .h: :osoys .smMN- :/
|
|
||||||
-/:.' s- /MMMymh. '/y/ s'
|
|
||||||
-+s:'''' d -mMMms// '-/o:
|
|
||||||
-/++/++/////:. o: '... s- :s.
|
|
||||||
:+-+s-' ':/' 's- /+ 'o:
|
|
||||||
'+-'o: /ydhsh. '//. '-o- o-
|
|
||||||
.y. o: .MMMdm+y ':+++:::/+:.' s:
|
|
||||||
.-h/ y- 'sdmds'h -+ydds:::-.' 'h.
|
|
||||||
.//-.d' o: '.' 'dsNMMMNh:.:++' :y
|
|
||||||
+y. 'd 's. .s:mddds: ++ o/
|
|
||||||
'N- odd 'o/. './o-s-' .---+++' o-
|
|
||||||
'N' yNd .://:/:::::. -s -+/s/./s' 'o/'
|
|
||||||
so' .h '''' ////s: '+. .s +y'
|
|
||||||
os/-.y' 's' 'y::+ +d'
|
|
||||||
'.:o/ -+:-:.' so.---.'
|
|
||||||
o' 'd-.''/s'
|
|
||||||
.s' :y.''.y
|
|
||||||
-s mo:::'
|
|
||||||
:: yh
|
|
||||||
// '''' /M'
|
|
||||||
o+ .s///:/. 'N:
|
|
||||||
:+ /: -s' ho
|
|
||||||
's- -/s/:+/.+h' +h
|
|
||||||
ys' ':' '-. -d
|
|
||||||
oh .h
|
|
||||||
/o .s
|
|
||||||
s. .h
|
|
||||||
-y .d
|
|
||||||
m/ -h
|
|
||||||
+d /o
|
|
||||||
'N- y:
|
|
||||||
h: m.
|
|
||||||
s- -d
|
|
||||||
o- s+
|
|
||||||
+- 'm'
|
|
||||||
s/ oo--.
|
|
||||||
y- /s ':+'
|
|
||||||
s' 'od--' .d:
|
|
||||||
-+ ':o: ':+-/+
|
|
||||||
y- .:+- '
|
|
||||||
//o- '.:+/.
|
|
||||||
.-:+/' ''-/+/.
|
|
||||||
./:' ''.:o+/-'
|
|
||||||
.+o:/:/+-' ''.-+ooo/-'
|
|
||||||
o: -h///++////-.
|
|
||||||
/: .o/
|
|
||||||
//+ 'y
|
|
||||||
./sooy.
|
|
||||||
|
|
||||||
`)
|
|
||||||
}
|
|
|
@ -1,194 +0,0 @@
|
||||||
package main_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"net/url"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/client"
|
|
||||||
main "github.com/influxdb/influxdb/cmd/influx"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestParseCommand_CommandsExist(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
c := main.CommandLine{}
|
|
||||||
tests := []struct {
|
|
||||||
cmd string
|
|
||||||
}{
|
|
||||||
{cmd: "gopher"},
|
|
||||||
{cmd: "connect"},
|
|
||||||
{cmd: "help"},
|
|
||||||
{cmd: "pretty"},
|
|
||||||
{cmd: "use"},
|
|
||||||
{cmd: ""}, // test that a blank command just returns
|
|
||||||
}
|
|
||||||
for _, test := range tests {
|
|
||||||
if !c.ParseCommand(test.cmd) {
|
|
||||||
t.Fatalf(`Command failed for %q.`, test.cmd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseCommand_TogglePretty(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
c := main.CommandLine{}
|
|
||||||
if c.Pretty {
|
|
||||||
t.Fatalf(`Pretty should be false.`)
|
|
||||||
}
|
|
||||||
c.ParseCommand("pretty")
|
|
||||||
if !c.Pretty {
|
|
||||||
t.Fatalf(`Pretty should be true.`)
|
|
||||||
}
|
|
||||||
c.ParseCommand("pretty")
|
|
||||||
if c.Pretty {
|
|
||||||
t.Fatalf(`Pretty should be false.`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseCommand_Exit(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
c := main.CommandLine{}
|
|
||||||
tests := []struct {
|
|
||||||
cmd string
|
|
||||||
}{
|
|
||||||
{cmd: "exit"},
|
|
||||||
{cmd: " exit"},
|
|
||||||
{cmd: "exit "},
|
|
||||||
{cmd: "Exit "},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
if c.ParseCommand(test.cmd) {
|
|
||||||
t.Fatalf(`Command "exit" failed for %q.`, test.cmd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseCommand_Use(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
c := main.CommandLine{}
|
|
||||||
tests := []struct {
|
|
||||||
cmd string
|
|
||||||
}{
|
|
||||||
{cmd: "use db"},
|
|
||||||
{cmd: " use db"},
|
|
||||||
{cmd: "use db "},
|
|
||||||
{cmd: "use db;"},
|
|
||||||
{cmd: "use db; "},
|
|
||||||
{cmd: "Use db"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
if !c.ParseCommand(test.cmd) {
|
|
||||||
t.Fatalf(`Command "use" failed for %q.`, test.cmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Database != "db" {
|
|
||||||
t.Fatalf(`Command "use" changed database to %q. Expected db`, c.Database)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseCommand_Insert(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
var data client.Response
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
_ = json.NewEncoder(w).Encode(data)
|
|
||||||
}))
|
|
||||||
defer ts.Close()
|
|
||||||
|
|
||||||
u, _ := url.Parse(ts.URL)
|
|
||||||
config := client.Config{URL: *u}
|
|
||||||
c, err := client.NewClient(config)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
|
||||||
}
|
|
||||||
m := main.CommandLine{Client: c}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
cmd string
|
|
||||||
}{
|
|
||||||
{cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"},
|
|
||||||
{cmd: " INSERT cpu,host=serverA,region=us-west value=1.0"},
|
|
||||||
{cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"},
|
|
||||||
{cmd: "insert cpu,host=serverA,region=us-west value=1.0 "},
|
|
||||||
{cmd: "insert"},
|
|
||||||
{cmd: "Insert "},
|
|
||||||
{cmd: "insert c"},
|
|
||||||
{cmd: "insert int"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
if !m.ParseCommand(test.cmd) {
|
|
||||||
t.Fatalf(`Command "insert" failed for %q.`, test.cmd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseCommand_InsertInto(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
var data client.Response
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
_ = json.NewEncoder(w).Encode(data)
|
|
||||||
}))
|
|
||||||
defer ts.Close()
|
|
||||||
|
|
||||||
u, _ := url.Parse(ts.URL)
|
|
||||||
config := client.Config{URL: *u}
|
|
||||||
c, err := client.NewClient(config)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error. expected %v, actual %v", nil, err)
|
|
||||||
}
|
|
||||||
m := main.CommandLine{Client: c}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
cmd, db, rp string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
cmd: `INSERT INTO test cpu,host=serverA,region=us-west value=1.0`,
|
|
||||||
db: "",
|
|
||||||
rp: "test",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
cmd: ` INSERT INTO .test cpu,host=serverA,region=us-west value=1.0`,
|
|
||||||
db: "",
|
|
||||||
rp: "test",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
cmd: `INSERT INTO "test test" cpu,host=serverA,region=us-west value=1.0`,
|
|
||||||
db: "",
|
|
||||||
rp: "test test",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
cmd: `Insert iNTO test.test cpu,host=serverA,region=us-west value=1.0`,
|
|
||||||
db: "test",
|
|
||||||
rp: "test",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
cmd: `insert into "test test" cpu,host=serverA,region=us-west value=1.0`,
|
|
||||||
db: "test",
|
|
||||||
rp: "test test",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
cmd: `insert into "d b"."test test" cpu,host=serverA,region=us-west value=1.0`,
|
|
||||||
db: "d b",
|
|
||||||
rp: "test test",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
if !m.ParseCommand(test.cmd) {
|
|
||||||
t.Fatalf(`Command "insert into" failed for %q.`, test.cmd)
|
|
||||||
}
|
|
||||||
if m.Database != test.db {
|
|
||||||
t.Fatalf(`Command "insert into" db parsing failed, expected: %q, actual: %q`, test.db, m.Database)
|
|
||||||
}
|
|
||||||
if m.RetentionPolicy != test.rp {
|
|
||||||
t.Fatalf(`Command "insert into" rp parsing failed, expected: %q, actual: %q`, test.rp, m.RetentionPolicy)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
154
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go
generated
vendored
154
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go
generated
vendored
|
@ -1,154 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
|
||||||
"net/url"
|
|
||||||
"runtime"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/client"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
batchSize = flag.Int("batchsize", 5000, "number of points per batch")
|
|
||||||
seriesCount = flag.Int("series", 100000, "number of unique series to create")
|
|
||||||
pointCount = flag.Int("points", 100, "number of points per series to create")
|
|
||||||
concurrency = flag.Int("concurrency", 10, "number of simultaneous writes to run")
|
|
||||||
batchInterval = flag.Duration("batchinterval", 0*time.Second, "duration between batches")
|
|
||||||
database = flag.String("database", "stress", "name of database")
|
|
||||||
address = flag.String("addr", "localhost:8086", "IP address and port of database (e.g., localhost:8086)")
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
|
||||||
|
|
||||||
startTime := time.Now()
|
|
||||||
counter := NewConcurrencyLimiter(*concurrency)
|
|
||||||
|
|
||||||
u, _ := url.Parse(fmt.Sprintf("http://%s", *address))
|
|
||||||
c, err := client.NewClient(client.Config{URL: *u})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var mu sync.Mutex
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
responseTimes := make([]int, 0)
|
|
||||||
|
|
||||||
totalPoints := 0
|
|
||||||
|
|
||||||
batch := &client.BatchPoints{
|
|
||||||
Database: *database,
|
|
||||||
WriteConsistency: "any",
|
|
||||||
Time: time.Now(),
|
|
||||||
Precision: "n",
|
|
||||||
}
|
|
||||||
for i := 1; i <= *pointCount; i++ {
|
|
||||||
for j := 1; j <= *seriesCount; j++ {
|
|
||||||
p := client.Point{
|
|
||||||
Measurement: "cpu",
|
|
||||||
Tags: map[string]string{"region": "uswest", "host": fmt.Sprintf("host-%d", j)},
|
|
||||||
Fields: map[string]interface{}{"value": rand.Float64()},
|
|
||||||
}
|
|
||||||
batch.Points = append(batch.Points, p)
|
|
||||||
if len(batch.Points) >= *batchSize {
|
|
||||||
wg.Add(1)
|
|
||||||
counter.Increment()
|
|
||||||
totalPoints += len(batch.Points)
|
|
||||||
go func(b *client.BatchPoints, total int) {
|
|
||||||
st := time.Now()
|
|
||||||
if _, err := c.Write(*b); err != nil {
|
|
||||||
fmt.Println("ERROR: ", err.Error())
|
|
||||||
} else {
|
|
||||||
mu.Lock()
|
|
||||||
responseTimes = append(responseTimes, int(time.Since(st).Nanoseconds()))
|
|
||||||
mu.Unlock()
|
|
||||||
}
|
|
||||||
wg.Done()
|
|
||||||
counter.Decrement()
|
|
||||||
if total%500000 == 0 {
|
|
||||||
fmt.Printf("%d total points. %d in %s\n", total, *batchSize, time.Since(st))
|
|
||||||
}
|
|
||||||
}(batch, totalPoints)
|
|
||||||
|
|
||||||
batch = &client.BatchPoints{
|
|
||||||
Database: *database,
|
|
||||||
WriteConsistency: "any",
|
|
||||||
Precision: "n",
|
|
||||||
Time: time.Now(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
sort.Sort(sort.Reverse(sort.IntSlice(responseTimes)))
|
|
||||||
|
|
||||||
total := int64(0)
|
|
||||||
for _, t := range responseTimes {
|
|
||||||
total += int64(t)
|
|
||||||
}
|
|
||||||
mean := total / int64(len(responseTimes))
|
|
||||||
|
|
||||||
fmt.Printf("Wrote %d points at average rate of %.0f\n", totalPoints, float64(totalPoints)/time.Since(startTime).Seconds())
|
|
||||||
fmt.Println("Average response time: ", time.Duration(mean))
|
|
||||||
fmt.Println("Slowest response times:")
|
|
||||||
for _, r := range responseTimes[:100] {
|
|
||||||
fmt.Println(time.Duration(r))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConcurrencyLimiter is a go routine safe struct that can be used to
|
|
||||||
// ensure that no more than a specifid max number of goroutines are
|
|
||||||
// executing.
|
|
||||||
type ConcurrencyLimiter struct {
|
|
||||||
inc chan chan struct{}
|
|
||||||
dec chan struct{}
|
|
||||||
max int
|
|
||||||
count int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConcurrencyLimiter returns a configured limiter that will
|
|
||||||
// ensure that calls to Increment will block if the max is hit.
|
|
||||||
func NewConcurrencyLimiter(max int) *ConcurrencyLimiter {
|
|
||||||
c := &ConcurrencyLimiter{
|
|
||||||
inc: make(chan chan struct{}),
|
|
||||||
dec: make(chan struct{}, max),
|
|
||||||
max: max,
|
|
||||||
}
|
|
||||||
go c.handleLimits()
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Increment will increase the count of running goroutines by 1.
|
|
||||||
// if the number is currently at the max, the call to Increment
|
|
||||||
// will block until another goroutine decrements.
|
|
||||||
func (c *ConcurrencyLimiter) Increment() {
|
|
||||||
r := make(chan struct{})
|
|
||||||
c.inc <- r
|
|
||||||
<-r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrement will reduce the count of running goroutines by 1
|
|
||||||
func (c *ConcurrencyLimiter) Decrement() {
|
|
||||||
c.dec <- struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleLimits runs in a goroutine to manage the count of
|
|
||||||
// running goroutines.
|
|
||||||
func (c *ConcurrencyLimiter) handleLimits() {
|
|
||||||
for {
|
|
||||||
r := <-c.inc
|
|
||||||
if c.count >= c.max {
|
|
||||||
<-c.dec
|
|
||||||
c.count--
|
|
||||||
}
|
|
||||||
c.count++
|
|
||||||
r <- struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
170
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup.go
generated
vendored
170
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup.go
generated
vendored
|
@ -1,170 +0,0 @@
|
||||||
package backup
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/services/snapshotter"
|
|
||||||
"github.com/influxdb/influxdb/snapshot"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Suffix is a suffix added to the backup while it's in-process.
|
|
||||||
const Suffix = ".pending"
|
|
||||||
|
|
||||||
// Command represents the program execution for "influxd backup".
|
|
||||||
type Command struct {
|
|
||||||
// The logger passed to the ticker during execution.
|
|
||||||
Logger *log.Logger
|
|
||||||
|
|
||||||
// Standard input/output, overridden for testing.
|
|
||||||
Stderr io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCommand returns a new instance of Command with default settings.
|
|
||||||
func NewCommand() *Command {
|
|
||||||
return &Command{
|
|
||||||
Stderr: os.Stderr,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run executes the program.
|
|
||||||
func (cmd *Command) Run(args ...string) error {
|
|
||||||
// Set up logger.
|
|
||||||
cmd.Logger = log.New(cmd.Stderr, "", log.LstdFlags)
|
|
||||||
cmd.Logger.Printf("influxdb backup")
|
|
||||||
|
|
||||||
// Parse command line arguments.
|
|
||||||
host, path, err := cmd.parseFlags(args)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve snapshot from local file.
|
|
||||||
m, err := snapshot.ReadFileManifest(path)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return fmt.Errorf("read file snapshot: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine temporary path to download to.
|
|
||||||
tmppath := path + Suffix
|
|
||||||
|
|
||||||
// Calculate path of next backup file.
|
|
||||||
// This uses the path if it doesn't exist.
|
|
||||||
// Otherwise it appends an autoincrementing number.
|
|
||||||
path, err = cmd.nextPath(path)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("next path: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve snapshot.
|
|
||||||
if err := cmd.download(host, m, tmppath); err != nil {
|
|
||||||
return fmt.Errorf("download: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rename temporary file to final path.
|
|
||||||
if err := os.Rename(tmppath, path); err != nil {
|
|
||||||
return fmt.Errorf("rename: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Check file integrity.
|
|
||||||
|
|
||||||
// Notify user of completion.
|
|
||||||
cmd.Logger.Println("backup complete")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseFlags parses and validates the command line arguments.
|
|
||||||
func (cmd *Command) parseFlags(args []string) (host string, path string, err error) {
|
|
||||||
fs := flag.NewFlagSet("", flag.ContinueOnError)
|
|
||||||
fs.StringVar(&host, "host", "localhost:8088", "")
|
|
||||||
fs.SetOutput(cmd.Stderr)
|
|
||||||
fs.Usage = cmd.printUsage
|
|
||||||
if err := fs.Parse(args); err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that only one arg is specified.
|
|
||||||
if fs.NArg() == 0 {
|
|
||||||
return "", "", errors.New("snapshot path required")
|
|
||||||
} else if fs.NArg() != 1 {
|
|
||||||
return "", "", errors.New("only one snapshot path allowed")
|
|
||||||
}
|
|
||||||
path = fs.Arg(0)
|
|
||||||
|
|
||||||
return host, path, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// nextPath returns the next file to write to.
|
|
||||||
func (cmd *Command) nextPath(path string) (string, error) {
|
|
||||||
// Use base path if it doesn't exist.
|
|
||||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
|
||||||
return path, nil
|
|
||||||
} else if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise iterate through incremental files until one is available.
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
s := fmt.Sprintf(path+".%d", i)
|
|
||||||
if _, err := os.Stat(s); os.IsNotExist(err) {
|
|
||||||
return s, nil
|
|
||||||
} else if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// download downloads a snapshot from a host to a given path.
|
|
||||||
func (cmd *Command) download(host string, m *snapshot.Manifest, path string) error {
|
|
||||||
// Create local file to write to.
|
|
||||||
f, err := os.Create(path)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("open temp file: %s", err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
// Connect to snapshotter service.
|
|
||||||
conn, err := net.Dial("tcp", host)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
// Send snapshotter marker byte.
|
|
||||||
if _, err := conn.Write([]byte{snapshotter.MuxHeader}); err != nil {
|
|
||||||
return fmt.Errorf("write snapshot header byte: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the manifest we currently have.
|
|
||||||
if err := json.NewEncoder(conn).Encode(m); err != nil {
|
|
||||||
return fmt.Errorf("encode snapshot manifest: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read snapshot from the connection.
|
|
||||||
if _, err := io.Copy(f, conn); err != nil {
|
|
||||||
return fmt.Errorf("copy snapshot to file: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME(benbjohnson): Verify integrity of snapshot.
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// printUsage prints the usage message to STDERR.
|
|
||||||
func (cmd *Command) printUsage() {
|
|
||||||
fmt.Fprintf(cmd.Stderr, `usage: influxd backup [flags] PATH
|
|
||||||
|
|
||||||
backup downloads a snapshot of a data node and saves it to disk.
|
|
||||||
|
|
||||||
-host <host:port>
|
|
||||||
The host to connect to snapshot.
|
|
||||||
Defaults to 127.0.0.1:8088.
|
|
||||||
`)
|
|
||||||
}
|
|
125
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup_test.go
generated
vendored
125
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup_test.go
generated
vendored
|
@ -1,125 +0,0 @@
|
||||||
package backup_test
|
|
||||||
|
|
||||||
/*
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb"
|
|
||||||
"github.com/influxdb/influxdb/cmd/influxd"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ensure the backup can download from the server and save to disk.
|
|
||||||
func TestBackupCommand(t *testing.T) {
|
|
||||||
// Mock the backup endpoint.
|
|
||||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if r.URL.Path != "/data/snapshot" {
|
|
||||||
t.Fatalf("unexpected url path: %s", r.URL.Path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write a simple snapshot to the buffer.
|
|
||||||
sw := influxdb.NewSnapshotWriter()
|
|
||||||
sw.Snapshot = &influxdb.Snapshot{Files: []influxdb.SnapshotFile{
|
|
||||||
{Name: "meta", Size: 5, Index: 10},
|
|
||||||
}}
|
|
||||||
sw.FileWriters["meta"] = influxdb.NopWriteToCloser(bytes.NewBufferString("55555"))
|
|
||||||
if _, err := sw.WriteTo(w); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
defer s.Close()
|
|
||||||
|
|
||||||
// Create a temp path and remove incremental backups at the end.
|
|
||||||
path := tempfile()
|
|
||||||
defer os.Remove(path)
|
|
||||||
defer os.Remove(path + ".0")
|
|
||||||
defer os.Remove(path + ".1")
|
|
||||||
|
|
||||||
// Execute the backup against the mock server.
|
|
||||||
for i := 0; i < 3; i++ {
|
|
||||||
if err := NewBackupCommand().Run("-host", s.URL, path); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify snapshot and two incremental snapshots were written.
|
|
||||||
if _, err := os.Stat(path); err != nil {
|
|
||||||
t.Fatalf("snapshot not found: %s", err)
|
|
||||||
} else if _, err = os.Stat(path + ".0"); err != nil {
|
|
||||||
t.Fatalf("incremental snapshot(0) not found: %s", err)
|
|
||||||
} else if _, err = os.Stat(path + ".1"); err != nil {
|
|
||||||
t.Fatalf("incremental snapshot(1) not found: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the backup command returns an error if flags cannot be parsed.
|
|
||||||
func TestBackupCommand_ErrFlagParse(t *testing.T) {
|
|
||||||
cmd := NewBackupCommand()
|
|
||||||
if err := cmd.Run("-bad-flag"); err == nil || err.Error() != `flag provided but not defined: -bad-flag` {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if !strings.Contains(cmd.Stderr.String(), "usage") {
|
|
||||||
t.Fatal("usage message not displayed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the backup command returns an error if the host cannot be parsed.
|
|
||||||
func TestBackupCommand_ErrInvalidHostURL(t *testing.T) {
|
|
||||||
if err := NewBackupCommand().Run("-host", "http://%f"); err == nil || err.Error() != `parse host url: parse http://%f: hexadecimal escape in host` {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the backup command returns an error if the output path is not specified.
|
|
||||||
func TestBackupCommand_ErrPathRequired(t *testing.T) {
|
|
||||||
if err := NewBackupCommand().Run("-host", "//localhost"); err == nil || err.Error() != `snapshot path required` {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the backup returns an error if it cannot connect to the server.
|
|
||||||
func TestBackupCommand_ErrConnectionRefused(t *testing.T) {
|
|
||||||
// Start and immediately stop a server so we have a dead port.
|
|
||||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
|
|
||||||
s.Close()
|
|
||||||
|
|
||||||
// Execute the backup command.
|
|
||||||
path := tempfile()
|
|
||||||
defer os.Remove(path)
|
|
||||||
if err := NewBackupCommand().Run("-host", s.URL, path); err == nil ||
|
|
||||||
!(strings.Contains(err.Error(), `connection refused`) || strings.Contains(err.Error(), `No connection could be made`)) {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the backup returns any non-200 status codes.
|
|
||||||
func TestBackupCommand_ErrServerError(t *testing.T) {
|
|
||||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
|
||||||
}))
|
|
||||||
defer s.Close()
|
|
||||||
|
|
||||||
// Execute the backup command.
|
|
||||||
path := tempfile()
|
|
||||||
defer os.Remove(path)
|
|
||||||
if err := NewBackupCommand().Run("-host", s.URL, path); err == nil || err.Error() != `download: snapshot error: status=500` {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BackupCommand is a test wrapper for main.BackupCommand.
|
|
||||||
type BackupCommand struct {
|
|
||||||
*main.BackupCommand
|
|
||||||
Stderr bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBackupCommand returns a new instance of BackupCommand.
|
|
||||||
func NewBackupCommand() *BackupCommand {
|
|
||||||
cmd := &BackupCommand{BackupCommand: main.NewBackupCommand()}
|
|
||||||
cmd.BackupCommand.Stderr = &cmd.Stderr
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
*/
|
|
|
@ -1,46 +0,0 @@
|
||||||
package help
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Command displays help for command-line sub-commands.
|
|
||||||
type Command struct {
|
|
||||||
Stdout io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCommand returns a new instance of Command.
|
|
||||||
func NewCommand() *Command {
|
|
||||||
return &Command{
|
|
||||||
Stdout: os.Stdout,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run executes the command.
|
|
||||||
func (cmd *Command) Run(args ...string) error {
|
|
||||||
fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const usage = `
|
|
||||||
Configure and start an InfluxDB server.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
|
|
||||||
influxd [[command] [arguments]]
|
|
||||||
|
|
||||||
The commands are:
|
|
||||||
|
|
||||||
backup downloads a snapshot of a data node and saves it to disk
|
|
||||||
config display the default configuration
|
|
||||||
restore uses a snapshot of a data node to rebuild a cluster
|
|
||||||
run run node with existing configuration
|
|
||||||
version displays the InfluxDB version
|
|
||||||
|
|
||||||
"run" is the default command.
|
|
||||||
|
|
||||||
Use "influxd help [command]" for more information about a command.
|
|
||||||
`
|
|
|
@ -1,200 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"math/rand"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/cmd/influxd/backup"
|
|
||||||
"github.com/influxdb/influxdb/cmd/influxd/help"
|
|
||||||
"github.com/influxdb/influxdb/cmd/influxd/restore"
|
|
||||||
"github.com/influxdb/influxdb/cmd/influxd/run"
|
|
||||||
)
|
|
||||||
|
|
||||||
// These variables are populated via the Go linker.
|
|
||||||
var (
|
|
||||||
version string = "0.9"
|
|
||||||
commit string
|
|
||||||
branch string
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// If commit or branch are not set, make that clear.
|
|
||||||
if commit == "" {
|
|
||||||
commit = "unknown"
|
|
||||||
}
|
|
||||||
if branch == "" {
|
|
||||||
branch = "unknown"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
rand.Seed(time.Now().UnixNano())
|
|
||||||
|
|
||||||
m := NewMain()
|
|
||||||
if err := m.Run(os.Args[1:]...); err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Main represents the program execution.
|
|
||||||
type Main struct {
|
|
||||||
Logger *log.Logger
|
|
||||||
|
|
||||||
Stdin io.Reader
|
|
||||||
Stdout io.Writer
|
|
||||||
Stderr io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMain return a new instance of Main.
|
|
||||||
func NewMain() *Main {
|
|
||||||
return &Main{
|
|
||||||
Logger: log.New(os.Stderr, "[run] ", log.LstdFlags),
|
|
||||||
Stdin: os.Stdin,
|
|
||||||
Stdout: os.Stdout,
|
|
||||||
Stderr: os.Stderr,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run determines and runs the command specified by the CLI args.
|
|
||||||
func (m *Main) Run(args ...string) error {
|
|
||||||
name, args := ParseCommandName(args)
|
|
||||||
|
|
||||||
// Extract name from args.
|
|
||||||
switch name {
|
|
||||||
case "", "run":
|
|
||||||
cmd := run.NewCommand()
|
|
||||||
|
|
||||||
// Tell the server the build details.
|
|
||||||
cmd.Version = version
|
|
||||||
cmd.Commit = commit
|
|
||||||
cmd.Branch = branch
|
|
||||||
|
|
||||||
if err := cmd.Run(args...); err != nil {
|
|
||||||
return fmt.Errorf("run: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
signalCh := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)
|
|
||||||
m.Logger.Println("Listening for signals")
|
|
||||||
|
|
||||||
// Block until one of the signals above is received
|
|
||||||
select {
|
|
||||||
case <-signalCh:
|
|
||||||
m.Logger.Println("Signal received, initializing clean shutdown...")
|
|
||||||
go func() {
|
|
||||||
cmd.Close()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block again until another signal is received, a shutdown timeout elapses,
|
|
||||||
// or the Command is gracefully closed
|
|
||||||
m.Logger.Println("Waiting for clean shutdown...")
|
|
||||||
select {
|
|
||||||
case <-signalCh:
|
|
||||||
m.Logger.Println("second signal received, initializing hard shutdown")
|
|
||||||
case <-time.After(time.Second * 30):
|
|
||||||
m.Logger.Println("time limit reached, initializing hard shutdown")
|
|
||||||
case <-cmd.Closed:
|
|
||||||
m.Logger.Println("server shutdown completed")
|
|
||||||
}
|
|
||||||
|
|
||||||
// goodbye.
|
|
||||||
|
|
||||||
case "backup":
|
|
||||||
name := backup.NewCommand()
|
|
||||||
if err := name.Run(args...); err != nil {
|
|
||||||
return fmt.Errorf("backup: %s", err)
|
|
||||||
}
|
|
||||||
case "restore":
|
|
||||||
name := restore.NewCommand()
|
|
||||||
if err := name.Run(args...); err != nil {
|
|
||||||
return fmt.Errorf("restore: %s", err)
|
|
||||||
}
|
|
||||||
case "config":
|
|
||||||
if err := run.NewPrintConfigCommand().Run(args...); err != nil {
|
|
||||||
return fmt.Errorf("config: %s", err)
|
|
||||||
}
|
|
||||||
case "version":
|
|
||||||
if err := NewVersionCommand().Run(args...); err != nil {
|
|
||||||
return fmt.Errorf("version: %s", err)
|
|
||||||
}
|
|
||||||
case "help":
|
|
||||||
if err := help.NewCommand().Run(args...); err != nil {
|
|
||||||
return fmt.Errorf("help: %s", err)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'influxd help' for usage`+"\n\n", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseCommandName extracts the command name and args from the args list.
|
|
||||||
func ParseCommandName(args []string) (string, []string) {
|
|
||||||
// Retrieve command name as first argument.
|
|
||||||
var name string
|
|
||||||
if len(args) > 0 && !strings.HasPrefix(args[0], "-") {
|
|
||||||
name = args[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special case -h immediately following binary name
|
|
||||||
if len(args) > 0 && args[0] == "-h" {
|
|
||||||
name = "help"
|
|
||||||
}
|
|
||||||
|
|
||||||
// If command is "help" and has an argument then rewrite args to use "-h".
|
|
||||||
if name == "help" && len(args) > 1 {
|
|
||||||
args[0], args[1] = args[1], "-h"
|
|
||||||
name = args[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// If a named command is specified then return it with its arguments.
|
|
||||||
if name != "" {
|
|
||||||
return name, args[1:]
|
|
||||||
}
|
|
||||||
return "", args
|
|
||||||
}
|
|
||||||
|
|
||||||
// Command represents the command executed by "influxd version".
|
|
||||||
type VersionCommand struct {
|
|
||||||
Stdout io.Writer
|
|
||||||
Stderr io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewVersionCommand return a new instance of VersionCommand.
|
|
||||||
func NewVersionCommand() *VersionCommand {
|
|
||||||
return &VersionCommand{
|
|
||||||
Stdout: os.Stdout,
|
|
||||||
Stderr: os.Stderr,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run prints the current version and commit info.
|
|
||||||
func (cmd *VersionCommand) Run(args ...string) error {
|
|
||||||
// Parse flags in case -h is specified.
|
|
||||||
fs := flag.NewFlagSet("", flag.ContinueOnError)
|
|
||||||
fs.Usage = func() { fmt.Fprintln(cmd.Stderr, strings.TrimSpace(versionUsage)) }
|
|
||||||
if err := fs.Parse(args); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print version info.
|
|
||||||
fmt.Fprintf(cmd.Stdout, "InfluxDB v%s (git: %s %s)\n", version, branch, commit)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var versionUsage = `
|
|
||||||
usage: version
|
|
||||||
|
|
||||||
version displays the InfluxDB version, build branch and git commit hash
|
|
||||||
`
|
|
250
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go
generated
vendored
250
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go
generated
vendored
|
@ -1,250 +0,0 @@
|
||||||
package restore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/snapshot"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Command represents the program execution for "influxd restore".
|
|
||||||
type Command struct {
|
|
||||||
Stdout io.Writer
|
|
||||||
Stderr io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCommand returns a new instance of Command with default settings.
|
|
||||||
func NewCommand() *Command {
|
|
||||||
return &Command{
|
|
||||||
Stdout: os.Stdout,
|
|
||||||
Stderr: os.Stderr,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run executes the program.
|
|
||||||
func (cmd *Command) Run(args ...string) error {
|
|
||||||
config, path, err := cmd.parseFlags(args)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return cmd.Restore(config, path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cmd *Command) Restore(config *Config, path string) error {
|
|
||||||
// Remove meta and data directories.
|
|
||||||
if err := os.RemoveAll(config.Meta.Dir); err != nil {
|
|
||||||
return fmt.Errorf("remove meta dir: %s", err)
|
|
||||||
} else if err := os.RemoveAll(config.Data.Dir); err != nil {
|
|
||||||
return fmt.Errorf("remove data dir: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open snapshot file and all incremental backups.
|
|
||||||
mr, files, err := snapshot.OpenFileMultiReader(path)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("open multireader: %s", err)
|
|
||||||
}
|
|
||||||
defer closeAll(files)
|
|
||||||
|
|
||||||
// Unpack files from archive.
|
|
||||||
if err := cmd.unpack(mr, config); err != nil {
|
|
||||||
return fmt.Errorf("unpack: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notify user of completion.
|
|
||||||
fmt.Fprintf(os.Stdout, "restore complete using %s", path)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseFlags parses and validates the command line arguments.
|
|
||||||
func (cmd *Command) parseFlags(args []string) (*Config, string, error) {
|
|
||||||
fs := flag.NewFlagSet("", flag.ContinueOnError)
|
|
||||||
configPath := fs.String("config", "", "")
|
|
||||||
fs.SetOutput(cmd.Stderr)
|
|
||||||
fs.Usage = cmd.printUsage
|
|
||||||
if err := fs.Parse(args); err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse configuration file from disk.
|
|
||||||
if *configPath == "" {
|
|
||||||
return nil, "", fmt.Errorf("config required")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse config.
|
|
||||||
config := Config{
|
|
||||||
Meta: meta.NewConfig(),
|
|
||||||
Data: tsdb.NewConfig(),
|
|
||||||
}
|
|
||||||
if _, err := toml.DecodeFile(*configPath, &config); err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Require output path.
|
|
||||||
path := fs.Arg(0)
|
|
||||||
if path == "" {
|
|
||||||
return nil, "", fmt.Errorf("snapshot path required")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &config, path, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func closeAll(a []io.Closer) {
|
|
||||||
for _, c := range a {
|
|
||||||
_ = c.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// unpack expands the files in the snapshot archive into a directory.
|
|
||||||
func (cmd *Command) unpack(mr *snapshot.MultiReader, config *Config) error {
|
|
||||||
// Loop over files and extract.
|
|
||||||
for {
|
|
||||||
// Read entry header.
|
|
||||||
sf, err := mr.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
} else if err != nil {
|
|
||||||
return fmt.Errorf("next: entry=%s, err=%s", sf.Name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log progress.
|
|
||||||
fmt.Fprintf(os.Stdout, "unpacking: %s (%d bytes)\n", sf.Name, sf.Size)
|
|
||||||
|
|
||||||
// Handle meta and tsdb files separately.
|
|
||||||
switch sf.Name {
|
|
||||||
case "meta":
|
|
||||||
if err := cmd.unpackMeta(mr, sf, config); err != nil {
|
|
||||||
return fmt.Errorf("meta: %s", err)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
if err := cmd.unpackData(mr, sf, config); err != nil {
|
|
||||||
return fmt.Errorf("data: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// unpackMeta reads the metadata from the snapshot and initializes a raft
|
|
||||||
// cluster and replaces the root metadata.
|
|
||||||
func (cmd *Command) unpackMeta(mr *snapshot.MultiReader, sf snapshot.File, config *Config) error {
|
|
||||||
// Read meta into buffer.
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if _, err := io.CopyN(&buf, mr, sf.Size); err != nil {
|
|
||||||
return fmt.Errorf("copy: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unpack into metadata.
|
|
||||||
var data meta.Data
|
|
||||||
if err := data.UnmarshalBinary(buf.Bytes()); err != nil {
|
|
||||||
return fmt.Errorf("unmarshal: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy meta config and remove peers so it starts in single mode.
|
|
||||||
c := config.Meta
|
|
||||||
c.Peers = nil
|
|
||||||
|
|
||||||
// Initialize meta store.
|
|
||||||
store := meta.NewStore(config.Meta)
|
|
||||||
store.RaftListener = newNopListener()
|
|
||||||
store.ExecListener = newNopListener()
|
|
||||||
|
|
||||||
// Determine advertised address.
|
|
||||||
_, port, err := net.SplitHostPort(config.Meta.BindAddress)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("split bind address: %s", err)
|
|
||||||
}
|
|
||||||
hostport := net.JoinHostPort(config.Meta.Hostname, port)
|
|
||||||
|
|
||||||
// Resolve address.
|
|
||||||
addr, err := net.ResolveTCPAddr("tcp", hostport)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("resolve tcp: addr=%s, err=%s", hostport, err)
|
|
||||||
}
|
|
||||||
store.Addr = addr
|
|
||||||
|
|
||||||
// Open the meta store.
|
|
||||||
if err := store.Open(); err != nil {
|
|
||||||
return fmt.Errorf("open store: %s", err)
|
|
||||||
}
|
|
||||||
defer store.Close()
|
|
||||||
|
|
||||||
// Wait for the store to be ready or error.
|
|
||||||
select {
|
|
||||||
case <-store.Ready():
|
|
||||||
case err := <-store.Err():
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Force set the full metadata.
|
|
||||||
if err := store.SetData(&data); err != nil {
|
|
||||||
return fmt.Errorf("set data: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cmd *Command) unpackData(mr *snapshot.MultiReader, sf snapshot.File, config *Config) error {
|
|
||||||
path := filepath.Join(config.Data.Dir, sf.Name)
|
|
||||||
// Create parent directory for output file.
|
|
||||||
if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {
|
|
||||||
return fmt.Errorf("mkdir: entry=%s, err=%s", sf.Name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create output file.
|
|
||||||
f, err := os.Create(path)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("create: entry=%s, err=%s", sf.Name, err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
// Copy contents from reader.
|
|
||||||
if _, err := io.CopyN(f, mr, sf.Size); err != nil {
|
|
||||||
return fmt.Errorf("copy: entry=%s, err=%s", sf.Name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// printUsage prints the usage message to STDERR.
|
|
||||||
func (cmd *Command) printUsage() {
|
|
||||||
fmt.Fprintf(cmd.Stderr, `usage: influxd restore [flags] PATH
|
|
||||||
|
|
||||||
restore uses a snapshot of a data node to rebuild a cluster.
|
|
||||||
|
|
||||||
-config <path>
|
|
||||||
Set the path to the configuration file.
|
|
||||||
`)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config represents a partial config for rebuilding the server.
|
|
||||||
type Config struct {
|
|
||||||
Meta *meta.Config `toml:"meta"`
|
|
||||||
Data tsdb.Config `toml:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type nopListener struct {
|
|
||||||
closing chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newNopListener() *nopListener {
|
|
||||||
return &nopListener{make(chan struct{})}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ln *nopListener) Accept() (net.Conn, error) {
|
|
||||||
<-ln.closing
|
|
||||||
return nil, errors.New("listener closing")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ln *nopListener) Close() error { close(ln.closing); return nil }
|
|
||||||
func (ln *nopListener) Addr() net.Addr { return nil }
|
|
155
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore_test.go
generated
vendored
155
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore_test.go
generated
vendored
|
@ -1,155 +0,0 @@
|
||||||
package restore_test
|
|
||||||
|
|
||||||
/*
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
main "github.com/influxdb/influxdb/cmd/influxd"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newConfig(path string, port int) main.Config {
|
|
||||||
config := main.NewConfig()
|
|
||||||
config.Port = port
|
|
||||||
config.Broker.Enabled = true
|
|
||||||
config.Broker.Dir = filepath.Join(path, "broker")
|
|
||||||
|
|
||||||
config.Data.Enabled = true
|
|
||||||
config.Data.Dir = filepath.Join(path, "data")
|
|
||||||
return *config
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the restore command can expand a snapshot and bootstrap a broker.
|
|
||||||
func TestRestoreCommand(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping TestRestoreCommand")
|
|
||||||
}
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
// Create root path to server.
|
|
||||||
path := tempfile()
|
|
||||||
defer os.Remove(path)
|
|
||||||
|
|
||||||
// Parse configuration.
|
|
||||||
config := newConfig(path, 8900)
|
|
||||||
|
|
||||||
// Start server.
|
|
||||||
cmd := main.NewRunCommand()
|
|
||||||
node := cmd.Open(&config, "")
|
|
||||||
if node.Broker == nil {
|
|
||||||
t.Fatal("cannot run broker")
|
|
||||||
} else if node.DataNode == nil {
|
|
||||||
t.Fatal("cannot run server")
|
|
||||||
}
|
|
||||||
b := node.Broker
|
|
||||||
s := node.DataNode
|
|
||||||
|
|
||||||
// Create data.
|
|
||||||
if err := s.CreateDatabase("db"); err != nil {
|
|
||||||
t.Fatalf("cannot create database: %s", err)
|
|
||||||
}
|
|
||||||
if index, err := s.WriteSeries("db", "default", []tsdb.Point{tsdb.NewPoint("cpu", nil, map[string]interface{}{"value": float64(100)}, now)}); err != nil {
|
|
||||||
t.Fatalf("cannot write series: %s", err)
|
|
||||||
} else if err = s.Sync(1, index); err != nil {
|
|
||||||
t.Fatalf("shard sync: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create snapshot writer.
|
|
||||||
sw, err := s.CreateSnapshotWriter()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("create snapshot writer: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Snapshot to file.
|
|
||||||
sspath := tempfile()
|
|
||||||
f, err := os.Create(sspath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
sw.WriteTo(f)
|
|
||||||
f.Close()
|
|
||||||
|
|
||||||
// Stop server.
|
|
||||||
node.Close()
|
|
||||||
|
|
||||||
// Remove data & broker directories.
|
|
||||||
if err := os.RemoveAll(path); err != nil {
|
|
||||||
t.Fatalf("remove: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute the restore.
|
|
||||||
if err := NewRestoreCommand().Restore(&config, sspath); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rewrite config to a new port and re-parse.
|
|
||||||
config = newConfig(path, 8910)
|
|
||||||
|
|
||||||
// Restart server.
|
|
||||||
cmd = main.NewRunCommand()
|
|
||||||
node = cmd.Open(&config, "")
|
|
||||||
if b == nil {
|
|
||||||
t.Fatal("cannot run broker")
|
|
||||||
} else if s == nil {
|
|
||||||
t.Fatal("cannot run server")
|
|
||||||
}
|
|
||||||
b = node.Broker
|
|
||||||
s = node.DataNode
|
|
||||||
|
|
||||||
// Write new data.
|
|
||||||
if err := s.CreateDatabase("newdb"); err != nil {
|
|
||||||
t.Fatalf("cannot create new database: %s", err)
|
|
||||||
}
|
|
||||||
if index, err := s.WriteSeries("newdb", "default", []tsdb.Point{tsdb.NewPoint("mem", nil, map[string]interface{}{"value": float64(1000)}, now)}); err != nil {
|
|
||||||
t.Fatalf("cannot write new series: %s", err)
|
|
||||||
} else if err = s.Sync(2, index); err != nil {
|
|
||||||
t.Fatalf("shard sync: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read series data.
|
|
||||||
if v, err := s.ReadSeries("db", "default", "cpu", nil, now); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if !reflect.DeepEqual(v, map[string]interface{}{"value": float64(100)}) {
|
|
||||||
t.Fatalf("read series(0) mismatch: %#v", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read new series data.
|
|
||||||
if v, err := s.ReadSeries("newdb", "default", "mem", nil, now); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if !reflect.DeepEqual(v, map[string]interface{}{"value": float64(1000)}) {
|
|
||||||
t.Fatalf("read series(1) mismatch: %#v", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop server.
|
|
||||||
node.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// RestoreCommand is a test wrapper for main.RestoreCommand.
|
|
||||||
type RestoreCommand struct {
|
|
||||||
*main.RestoreCommand
|
|
||||||
Stderr bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRestoreCommand returns a new instance of RestoreCommand.
|
|
||||||
func NewRestoreCommand() *RestoreCommand {
|
|
||||||
cmd := &RestoreCommand{RestoreCommand: main.NewRestoreCommand()}
|
|
||||||
cmd.RestoreCommand.Stderr = &cmd.Stderr
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustReadFile reads data from a file. Panic on error.
|
|
||||||
func MustReadFile(filename string) []byte {
|
|
||||||
b, err := ioutil.ReadFile(filename)
|
|
||||||
if err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
*/
|
|
235
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/command.go
generated
vendored
235
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/command.go
generated
vendored
|
@ -1,235 +0,0 @@
|
||||||
package run
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
)
|
|
||||||
|
|
||||||
const logo = `
|
|
||||||
8888888 .d888 888 8888888b. 888888b.
|
|
||||||
888 d88P" 888 888 "Y88b 888 "88b
|
|
||||||
888 888 888 888 888 888 .88P
|
|
||||||
888 88888b. 888888 888 888 888 888 888 888 888 8888888K.
|
|
||||||
888 888 "88b 888 888 888 888 Y8bd8P' 888 888 888 "Y88b
|
|
||||||
888 888 888 888 888 888 888 X88K 888 888 888 888
|
|
||||||
888 888 888 888 888 Y88b 888 .d8""8b. 888 .d88P 888 d88P
|
|
||||||
8888888 888 888 888 888 "Y88888 888 888 8888888P" 8888888P"
|
|
||||||
|
|
||||||
`
|
|
||||||
|
|
||||||
// Command represents the command executed by "influxd run".
|
|
||||||
type Command struct {
|
|
||||||
Version string
|
|
||||||
Branch string
|
|
||||||
Commit string
|
|
||||||
|
|
||||||
closing chan struct{}
|
|
||||||
Closed chan struct{}
|
|
||||||
|
|
||||||
Stdin io.Reader
|
|
||||||
Stdout io.Writer
|
|
||||||
Stderr io.Writer
|
|
||||||
|
|
||||||
Server *Server
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCommand return a new instance of Command.
|
|
||||||
func NewCommand() *Command {
|
|
||||||
return &Command{
|
|
||||||
closing: make(chan struct{}),
|
|
||||||
Closed: make(chan struct{}),
|
|
||||||
Stdin: os.Stdin,
|
|
||||||
Stdout: os.Stdout,
|
|
||||||
Stderr: os.Stderr,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run parses the config from args and runs the server.
|
|
||||||
func (cmd *Command) Run(args ...string) error {
|
|
||||||
// Parse the command line flags.
|
|
||||||
options, err := cmd.ParseFlags(args...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print sweet InfluxDB logo.
|
|
||||||
fmt.Print(logo)
|
|
||||||
|
|
||||||
// Write the PID file.
|
|
||||||
if err := cmd.writePIDFile(options.PIDFile); err != nil {
|
|
||||||
return fmt.Errorf("write pid file: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set parallelism.
|
|
||||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
|
||||||
|
|
||||||
// Turn on block profiling to debug stuck databases
|
|
||||||
runtime.SetBlockProfileRate(int(1 * time.Second))
|
|
||||||
|
|
||||||
// Parse config
|
|
||||||
config, err := cmd.ParseConfig(options.ConfigPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("parse config: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply any environment variables on top of the parsed config
|
|
||||||
if err := config.ApplyEnvOverrides(); err != nil {
|
|
||||||
return fmt.Errorf("apply env config: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Override config hostname if specified in the command line args.
|
|
||||||
if options.Hostname != "" {
|
|
||||||
config.Meta.Hostname = options.Hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.Join != "" {
|
|
||||||
config.Meta.Peers = strings.Split(options.Join, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate the configuration.
|
|
||||||
if err := config.Validate(); err != nil {
|
|
||||||
return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`.", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create server from config and start it.
|
|
||||||
s, err := NewServer(config, cmd.Version)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("create server: %s", err)
|
|
||||||
}
|
|
||||||
s.CPUProfile = options.CPUProfile
|
|
||||||
s.MemProfile = options.MemProfile
|
|
||||||
if err := s.Open(); err != nil {
|
|
||||||
return fmt.Errorf("open server: %s", err)
|
|
||||||
}
|
|
||||||
cmd.Server = s
|
|
||||||
|
|
||||||
// Mark start-up in log.
|
|
||||||
log.Printf("InfluxDB starting, version %s, branch %s, commit %s", cmd.Version, cmd.Branch, cmd.Commit)
|
|
||||||
log.Println("GOMAXPROCS set to", runtime.GOMAXPROCS(0))
|
|
||||||
|
|
||||||
// Begin monitoring the server's error channel.
|
|
||||||
go cmd.monitorServerErrors()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close shuts down the server.
|
|
||||||
func (cmd *Command) Close() error {
|
|
||||||
defer close(cmd.Closed)
|
|
||||||
close(cmd.closing)
|
|
||||||
if cmd.Server != nil {
|
|
||||||
return cmd.Server.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cmd *Command) monitorServerErrors() {
|
|
||||||
logger := log.New(cmd.Stderr, "", log.LstdFlags)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case err := <-cmd.Server.Err():
|
|
||||||
logger.Println(err)
|
|
||||||
case <-cmd.closing:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseFlags parses the command line flags from args and returns an options set.
|
|
||||||
func (cmd *Command) ParseFlags(args ...string) (Options, error) {
|
|
||||||
var options Options
|
|
||||||
fs := flag.NewFlagSet("", flag.ContinueOnError)
|
|
||||||
fs.StringVar(&options.ConfigPath, "config", "", "")
|
|
||||||
fs.StringVar(&options.PIDFile, "pidfile", "", "")
|
|
||||||
fs.StringVar(&options.Hostname, "hostname", "", "")
|
|
||||||
fs.StringVar(&options.Join, "join", "", "")
|
|
||||||
fs.StringVar(&options.CPUProfile, "cpuprofile", "", "")
|
|
||||||
fs.StringVar(&options.MemProfile, "memprofile", "", "")
|
|
||||||
fs.Usage = func() { fmt.Fprintln(cmd.Stderr, usage) }
|
|
||||||
if err := fs.Parse(args); err != nil {
|
|
||||||
return Options{}, err
|
|
||||||
}
|
|
||||||
return options, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writePIDFile writes the process ID to path.
|
|
||||||
func (cmd *Command) writePIDFile(path string) error {
|
|
||||||
// Ignore if path is not set.
|
|
||||||
if path == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the required directory structure exists.
|
|
||||||
err := os.MkdirAll(filepath.Dir(path), 0777)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("mkdir: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve the PID and write it.
|
|
||||||
pid := strconv.Itoa(os.Getpid())
|
|
||||||
if err := ioutil.WriteFile(path, []byte(pid), 0666); err != nil {
|
|
||||||
return fmt.Errorf("write file: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseConfig parses the config at path.
|
|
||||||
// Returns a demo configuration if path is blank.
|
|
||||||
func (cmd *Command) ParseConfig(path string) (*Config, error) {
|
|
||||||
// Use demo configuration if no config path is specified.
|
|
||||||
if path == "" {
|
|
||||||
fmt.Fprintln(cmd.Stdout, "no configuration provided, using default settings")
|
|
||||||
return NewDemoConfig()
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(cmd.Stdout, "Using configuration at: %s\n", path)
|
|
||||||
|
|
||||||
config := NewConfig()
|
|
||||||
if _, err := toml.DecodeFile(path, &config); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return config, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var usage = `usage: run [flags]
|
|
||||||
|
|
||||||
run starts the broker and data node server. If this is the first time running
|
|
||||||
the command then a new cluster will be initialized unless the -join argument
|
|
||||||
is used.
|
|
||||||
|
|
||||||
-config <path>
|
|
||||||
Set the path to the configuration file.
|
|
||||||
|
|
||||||
-hostname <name>
|
|
||||||
Override the hostname, the 'hostname' configuration
|
|
||||||
option will be overridden.
|
|
||||||
|
|
||||||
-join <url>
|
|
||||||
Joins the server to an existing cluster.
|
|
||||||
|
|
||||||
-pidfile <path>
|
|
||||||
Write process ID to a file.
|
|
||||||
`
|
|
||||||
|
|
||||||
// Options represents the command line options that can be parsed.
|
|
||||||
type Options struct {
|
|
||||||
ConfigPath string
|
|
||||||
PIDFile string
|
|
||||||
Hostname string
|
|
||||||
Join string
|
|
||||||
CPUProfile string
|
|
||||||
MemProfile string
|
|
||||||
}
|
|
|
@ -1,227 +0,0 @@
|
||||||
package run
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/user"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/cluster"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/services/admin"
|
|
||||||
"github.com/influxdb/influxdb/services/collectd"
|
|
||||||
"github.com/influxdb/influxdb/services/continuous_querier"
|
|
||||||
"github.com/influxdb/influxdb/services/graphite"
|
|
||||||
"github.com/influxdb/influxdb/services/hh"
|
|
||||||
"github.com/influxdb/influxdb/services/httpd"
|
|
||||||
"github.com/influxdb/influxdb/services/monitor"
|
|
||||||
"github.com/influxdb/influxdb/services/opentsdb"
|
|
||||||
"github.com/influxdb/influxdb/services/precreator"
|
|
||||||
"github.com/influxdb/influxdb/services/retention"
|
|
||||||
"github.com/influxdb/influxdb/services/udp"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config represents the configuration format for the influxd binary.
|
|
||||||
type Config struct {
|
|
||||||
Meta *meta.Config `toml:"meta"`
|
|
||||||
Data tsdb.Config `toml:"data"`
|
|
||||||
Cluster cluster.Config `toml:"cluster"`
|
|
||||||
Retention retention.Config `toml:"retention"`
|
|
||||||
Precreator precreator.Config `toml:"shard-precreation"`
|
|
||||||
|
|
||||||
Admin admin.Config `toml:"admin"`
|
|
||||||
HTTPD httpd.Config `toml:"http"`
|
|
||||||
Graphites []graphite.Config `toml:"graphite"`
|
|
||||||
Collectd collectd.Config `toml:"collectd"`
|
|
||||||
OpenTSDB opentsdb.Config `toml:"opentsdb"`
|
|
||||||
UDPs []udp.Config `toml:"udp"`
|
|
||||||
|
|
||||||
// Snapshot SnapshotConfig `toml:"snapshot"`
|
|
||||||
Monitoring monitor.Config `toml:"monitoring"`
|
|
||||||
ContinuousQuery continuous_querier.Config `toml:"continuous_queries"`
|
|
||||||
|
|
||||||
HintedHandoff hh.Config `toml:"hinted-handoff"`
|
|
||||||
|
|
||||||
// Server reporting
|
|
||||||
ReportingDisabled bool `toml:"reporting-disabled"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConfig returns an instance of Config with reasonable defaults.
|
|
||||||
func NewConfig() *Config {
|
|
||||||
c := &Config{}
|
|
||||||
c.Meta = meta.NewConfig()
|
|
||||||
c.Data = tsdb.NewConfig()
|
|
||||||
c.Cluster = cluster.NewConfig()
|
|
||||||
c.Precreator = precreator.NewConfig()
|
|
||||||
|
|
||||||
c.Admin = admin.NewConfig()
|
|
||||||
c.HTTPD = httpd.NewConfig()
|
|
||||||
c.Collectd = collectd.NewConfig()
|
|
||||||
c.OpenTSDB = opentsdb.NewConfig()
|
|
||||||
c.Graphites = append(c.Graphites, graphite.NewConfig())
|
|
||||||
|
|
||||||
c.Monitoring = monitor.NewConfig()
|
|
||||||
c.ContinuousQuery = continuous_querier.NewConfig()
|
|
||||||
c.Retention = retention.NewConfig()
|
|
||||||
c.HintedHandoff = hh.NewConfig()
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDemoConfig returns the config that runs when no config is specified.
|
|
||||||
func NewDemoConfig() (*Config, error) {
|
|
||||||
c := NewConfig()
|
|
||||||
|
|
||||||
var homeDir string
|
|
||||||
// By default, store meta and data files in current users home directory
|
|
||||||
u, err := user.Current()
|
|
||||||
if err == nil {
|
|
||||||
homeDir = u.HomeDir
|
|
||||||
} else if os.Getenv("HOME") != "" {
|
|
||||||
homeDir = os.Getenv("HOME")
|
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("failed to determine current user for storage")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Meta.Dir = filepath.Join(homeDir, ".influxdb/meta")
|
|
||||||
c.Data.Dir = filepath.Join(homeDir, ".influxdb/data")
|
|
||||||
c.HintedHandoff.Dir = filepath.Join(homeDir, ".influxdb/hh")
|
|
||||||
c.Data.WALDir = filepath.Join(homeDir, ".influxdb/wal")
|
|
||||||
|
|
||||||
c.Admin.Enabled = true
|
|
||||||
c.Monitoring.Enabled = false
|
|
||||||
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate returns an error if the config is invalid.
|
|
||||||
func (c *Config) Validate() error {
|
|
||||||
if c.Meta.Dir == "" {
|
|
||||||
return errors.New("Meta.Dir must be specified")
|
|
||||||
} else if c.Data.Dir == "" {
|
|
||||||
return errors.New("Data.Dir must be specified")
|
|
||||||
} else if c.HintedHandoff.Dir == "" {
|
|
||||||
return errors.New("HintedHandoff.Dir must be specified")
|
|
||||||
} else if c.Data.WALDir == "" {
|
|
||||||
return errors.New("Data.WALDir must be specified")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, g := range c.Graphites {
|
|
||||||
if err := g.Validate(); err != nil {
|
|
||||||
return fmt.Errorf("invalid graphite config: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) ApplyEnvOverrides() error {
|
|
||||||
return c.applyEnvOverrides("INFLUXDB", reflect.ValueOf(c))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) applyEnvOverrides(prefix string, spec reflect.Value) error {
|
|
||||||
// If we have a pointer, dereference it
|
|
||||||
s := spec
|
|
||||||
if spec.Kind() == reflect.Ptr {
|
|
||||||
s = spec.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure we have struct
|
|
||||||
if s.Kind() != reflect.Struct {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
typeOfSpec := s.Type()
|
|
||||||
for i := 0; i < s.NumField(); i++ {
|
|
||||||
f := s.Field(i)
|
|
||||||
// Get the toml tag to determine what env var name to use
|
|
||||||
configName := typeOfSpec.Field(i).Tag.Get("toml")
|
|
||||||
// Replace hyphens with underscores to avoid issues with shells
|
|
||||||
configName = strings.Replace(configName, "-", "_", -1)
|
|
||||||
fieldName := typeOfSpec.Field(i).Name
|
|
||||||
|
|
||||||
// Skip any fields that we cannot set
|
|
||||||
if f.CanSet() || f.Kind() == reflect.Slice {
|
|
||||||
|
|
||||||
// Use the upper-case prefix and toml name for the env var
|
|
||||||
key := strings.ToUpper(configName)
|
|
||||||
if prefix != "" {
|
|
||||||
key = strings.ToUpper(fmt.Sprintf("%s_%s", prefix, configName))
|
|
||||||
}
|
|
||||||
value := os.Getenv(key)
|
|
||||||
|
|
||||||
// If the type is s slice, apply to each using the index as a suffix
|
|
||||||
// e.g. GRAPHITE_0
|
|
||||||
if f.Kind() == reflect.Slice || f.Kind() == reflect.Array {
|
|
||||||
for i := 0; i < f.Len(); i++ {
|
|
||||||
if err := c.applyEnvOverrides(fmt.Sprintf("%s_%d", key, i), f.Index(i)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If it's a sub-config, recursively apply
|
|
||||||
if f.Kind() == reflect.Struct || f.Kind() == reflect.Ptr {
|
|
||||||
if err := c.applyEnvOverrides(key, f); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip any fields we don't have a value to set
|
|
||||||
if value == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch f.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
f.SetString(value)
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
|
|
||||||
var intValue int64
|
|
||||||
|
|
||||||
// Handle toml.Duration
|
|
||||||
if f.Type().Name() == "Duration" {
|
|
||||||
dur, err := time.ParseDuration(value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value)
|
|
||||||
}
|
|
||||||
intValue = dur.Nanoseconds()
|
|
||||||
} else {
|
|
||||||
var err error
|
|
||||||
intValue, err = strconv.ParseInt(value, 0, f.Type().Bits())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
f.SetInt(intValue)
|
|
||||||
case reflect.Bool:
|
|
||||||
boolValue, err := strconv.ParseBool(value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value)
|
|
||||||
|
|
||||||
}
|
|
||||||
f.SetBool(boolValue)
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
floatValue, err := strconv.ParseFloat(value, f.Type().Bits())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value)
|
|
||||||
|
|
||||||
}
|
|
||||||
f.SetFloat(floatValue)
|
|
||||||
default:
|
|
||||||
if err := c.applyEnvOverrides(key, f); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
73
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go
generated
vendored
73
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go
generated
vendored
|
@ -1,73 +0,0 @@
|
||||||
package run
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PrintConfigCommand represents the command executed by "influxd config".
|
|
||||||
type PrintConfigCommand struct {
|
|
||||||
Stdin io.Reader
|
|
||||||
Stdout io.Writer
|
|
||||||
Stderr io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPrintConfigCommand return a new instance of PrintConfigCommand.
|
|
||||||
func NewPrintConfigCommand() *PrintConfigCommand {
|
|
||||||
return &PrintConfigCommand{
|
|
||||||
Stdin: os.Stdin,
|
|
||||||
Stdout: os.Stdout,
|
|
||||||
Stderr: os.Stderr,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run parses and prints the current config loaded.
|
|
||||||
func (cmd *PrintConfigCommand) Run(args ...string) error {
|
|
||||||
// Parse command flags.
|
|
||||||
fs := flag.NewFlagSet("", flag.ContinueOnError)
|
|
||||||
configPath := fs.String("config", "", "")
|
|
||||||
hostname := fs.String("hostname", "", "")
|
|
||||||
fs.Usage = func() { fmt.Fprintln(cmd.Stderr, printConfigUsage) }
|
|
||||||
if err := fs.Parse(args); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse config from path.
|
|
||||||
config, err := cmd.parseConfig(*configPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("parse config: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Override config properties.
|
|
||||||
if *hostname != "" {
|
|
||||||
config.Meta.Hostname = *hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
toml.NewEncoder(cmd.Stdout).Encode(config)
|
|
||||||
fmt.Fprint(cmd.Stdout, "\n")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseConfig parses the config at path.
|
|
||||||
// Returns a demo configuration if path is blank.
|
|
||||||
func (cmd *PrintConfigCommand) parseConfig(path string) (*Config, error) {
|
|
||||||
if path == "" {
|
|
||||||
return NewDemoConfig()
|
|
||||||
}
|
|
||||||
|
|
||||||
config := NewConfig()
|
|
||||||
if _, err := toml.DecodeFile(path, &config); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return config, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var printConfigUsage = `usage: config
|
|
||||||
|
|
||||||
config displays the default configuration
|
|
||||||
`
|
|
144
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go
generated
vendored
144
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go
generated
vendored
|
@ -1,144 +0,0 @@
|
||||||
package run_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
"github.com/influxdb/influxdb/cmd/influxd/run"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ensure the configuration can be parsed.
|
|
||||||
func TestConfig_Parse(t *testing.T) {
|
|
||||||
// Parse configuration.
|
|
||||||
var c run.Config
|
|
||||||
if _, err := toml.Decode(`
|
|
||||||
[meta]
|
|
||||||
dir = "/tmp/meta"
|
|
||||||
|
|
||||||
[data]
|
|
||||||
dir = "/tmp/data"
|
|
||||||
|
|
||||||
[cluster]
|
|
||||||
|
|
||||||
[admin]
|
|
||||||
bind-address = ":8083"
|
|
||||||
|
|
||||||
[http]
|
|
||||||
bind-address = ":8087"
|
|
||||||
|
|
||||||
[[graphite]]
|
|
||||||
protocol = "udp"
|
|
||||||
|
|
||||||
[[graphite]]
|
|
||||||
protocol = "tcp"
|
|
||||||
|
|
||||||
[collectd]
|
|
||||||
bind-address = ":1000"
|
|
||||||
|
|
||||||
[opentsdb]
|
|
||||||
bind-address = ":2000"
|
|
||||||
|
|
||||||
[[udp]]
|
|
||||||
bind-address = ":4444"
|
|
||||||
|
|
||||||
[monitoring]
|
|
||||||
enabled = true
|
|
||||||
|
|
||||||
[continuous_queries]
|
|
||||||
enabled = true
|
|
||||||
`, &c); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate configuration.
|
|
||||||
if c.Meta.Dir != "/tmp/meta" {
|
|
||||||
t.Fatalf("unexpected meta dir: %s", c.Meta.Dir)
|
|
||||||
} else if c.Data.Dir != "/tmp/data" {
|
|
||||||
t.Fatalf("unexpected data dir: %s", c.Data.Dir)
|
|
||||||
} else if c.Admin.BindAddress != ":8083" {
|
|
||||||
t.Fatalf("unexpected admin bind address: %s", c.Admin.BindAddress)
|
|
||||||
} else if c.HTTPD.BindAddress != ":8087" {
|
|
||||||
t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress)
|
|
||||||
} else if len(c.Graphites) != 2 {
|
|
||||||
t.Fatalf("unexpected graphites count: %d", len(c.Graphites))
|
|
||||||
} else if c.Graphites[0].Protocol != "udp" {
|
|
||||||
t.Fatalf("unexpected graphite protocol(0): %s", c.Graphites[0].Protocol)
|
|
||||||
} else if c.Graphites[1].Protocol != "tcp" {
|
|
||||||
t.Fatalf("unexpected graphite protocol(1): %s", c.Graphites[1].Protocol)
|
|
||||||
} else if c.Collectd.BindAddress != ":1000" {
|
|
||||||
t.Fatalf("unexpected collectd bind address: %s", c.Collectd.BindAddress)
|
|
||||||
} else if c.OpenTSDB.BindAddress != ":2000" {
|
|
||||||
t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDB.BindAddress)
|
|
||||||
} else if c.UDPs[0].BindAddress != ":4444" {
|
|
||||||
t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress)
|
|
||||||
} else if c.Monitoring.Enabled != true {
|
|
||||||
t.Fatalf("unexpected monitoring enabled: %v", c.Monitoring.Enabled)
|
|
||||||
} else if c.ContinuousQuery.Enabled != true {
|
|
||||||
t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the configuration can be parsed.
|
|
||||||
func TestConfig_Parse_EnvOverride(t *testing.T) {
|
|
||||||
// Parse configuration.
|
|
||||||
var c run.Config
|
|
||||||
if _, err := toml.Decode(`
|
|
||||||
[meta]
|
|
||||||
dir = "/tmp/meta"
|
|
||||||
|
|
||||||
[data]
|
|
||||||
dir = "/tmp/data"
|
|
||||||
|
|
||||||
[cluster]
|
|
||||||
|
|
||||||
[admin]
|
|
||||||
bind-address = ":8083"
|
|
||||||
|
|
||||||
[http]
|
|
||||||
bind-address = ":8087"
|
|
||||||
|
|
||||||
[[graphite]]
|
|
||||||
protocol = "udp"
|
|
||||||
|
|
||||||
[[graphite]]
|
|
||||||
protocol = "tcp"
|
|
||||||
|
|
||||||
[collectd]
|
|
||||||
bind-address = ":1000"
|
|
||||||
|
|
||||||
[opentsdb]
|
|
||||||
bind-address = ":2000"
|
|
||||||
|
|
||||||
[[udp]]
|
|
||||||
bind-address = ":4444"
|
|
||||||
|
|
||||||
[monitoring]
|
|
||||||
enabled = true
|
|
||||||
|
|
||||||
[continuous_queries]
|
|
||||||
enabled = true
|
|
||||||
`, &c); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Setenv("INFLUXDB_UDP_BIND_ADDRESS", ":1234"); err != nil {
|
|
||||||
t.Fatalf("failed to set env var: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Setenv("INFLUXDB_GRAPHITE_1_PROTOCOL", "udp"); err != nil {
|
|
||||||
t.Fatalf("failed to set env var: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.ApplyEnvOverrides(); err != nil {
|
|
||||||
t.Fatalf("failed to apply env overrides: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.UDPs[0].BindAddress != ":4444" {
|
|
||||||
t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Graphites[1].Protocol != "udp" {
|
|
||||||
t.Fatalf("unexpected graphite protocol(0): %s", c.Graphites[0].Protocol)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,536 +0,0 @@
|
||||||
package run
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"runtime/pprof"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/cluster"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/services/admin"
|
|
||||||
"github.com/influxdb/influxdb/services/collectd"
|
|
||||||
"github.com/influxdb/influxdb/services/continuous_querier"
|
|
||||||
"github.com/influxdb/influxdb/services/graphite"
|
|
||||||
"github.com/influxdb/influxdb/services/hh"
|
|
||||||
"github.com/influxdb/influxdb/services/httpd"
|
|
||||||
"github.com/influxdb/influxdb/services/opentsdb"
|
|
||||||
"github.com/influxdb/influxdb/services/precreator"
|
|
||||||
"github.com/influxdb/influxdb/services/retention"
|
|
||||||
"github.com/influxdb/influxdb/services/snapshotter"
|
|
||||||
"github.com/influxdb/influxdb/services/udp"
|
|
||||||
"github.com/influxdb/influxdb/tcp"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
_ "github.com/influxdb/influxdb/tsdb/engine"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Server represents a container for the metadata and storage data and services.
|
|
||||||
// It is built using a Config and it manages the startup and shutdown of all
|
|
||||||
// services in the proper order.
|
|
||||||
type Server struct {
|
|
||||||
version string // Build version
|
|
||||||
|
|
||||||
err chan error
|
|
||||||
closing chan struct{}
|
|
||||||
|
|
||||||
Hostname string
|
|
||||||
BindAddress string
|
|
||||||
Listener net.Listener
|
|
||||||
|
|
||||||
MetaStore *meta.Store
|
|
||||||
TSDBStore *tsdb.Store
|
|
||||||
QueryExecutor *tsdb.QueryExecutor
|
|
||||||
PointsWriter *cluster.PointsWriter
|
|
||||||
ShardWriter *cluster.ShardWriter
|
|
||||||
ShardMapper *cluster.ShardMapper
|
|
||||||
HintedHandoff *hh.Service
|
|
||||||
|
|
||||||
Services []Service
|
|
||||||
|
|
||||||
// These references are required for the tcp muxer.
|
|
||||||
ClusterService *cluster.Service
|
|
||||||
SnapshotterService *snapshotter.Service
|
|
||||||
|
|
||||||
// Server reporting
|
|
||||||
reportingDisabled bool
|
|
||||||
|
|
||||||
// Profiling
|
|
||||||
CPUProfile string
|
|
||||||
MemProfile string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServer returns a new instance of Server built from a config.
|
|
||||||
func NewServer(c *Config, version string) (*Server, error) {
|
|
||||||
// Construct base meta store and data store.
|
|
||||||
tsdbStore := tsdb.NewStore(c.Data.Dir)
|
|
||||||
tsdbStore.EngineOptions.Config = c.Data
|
|
||||||
|
|
||||||
s := &Server{
|
|
||||||
version: version,
|
|
||||||
err: make(chan error),
|
|
||||||
closing: make(chan struct{}),
|
|
||||||
|
|
||||||
Hostname: c.Meta.Hostname,
|
|
||||||
BindAddress: c.Meta.BindAddress,
|
|
||||||
|
|
||||||
MetaStore: meta.NewStore(c.Meta),
|
|
||||||
TSDBStore: tsdbStore,
|
|
||||||
|
|
||||||
reportingDisabled: c.ReportingDisabled,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy TSDB configuration.
|
|
||||||
s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize
|
|
||||||
s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval)
|
|
||||||
s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay)
|
|
||||||
|
|
||||||
// Set the shard mapper
|
|
||||||
s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout))
|
|
||||||
s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping
|
|
||||||
s.ShardMapper.MetaStore = s.MetaStore
|
|
||||||
s.ShardMapper.TSDBStore = s.TSDBStore
|
|
||||||
|
|
||||||
// Initialize query executor.
|
|
||||||
s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore)
|
|
||||||
s.QueryExecutor.MetaStore = s.MetaStore
|
|
||||||
s.QueryExecutor.MetaStatementExecutor = &meta.StatementExecutor{Store: s.MetaStore}
|
|
||||||
s.QueryExecutor.ShardMapper = s.ShardMapper
|
|
||||||
|
|
||||||
// Set the shard writer
|
|
||||||
s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout))
|
|
||||||
s.ShardWriter.MetaStore = s.MetaStore
|
|
||||||
|
|
||||||
// Create the hinted handoff service
|
|
||||||
s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter)
|
|
||||||
|
|
||||||
// Initialize points writer.
|
|
||||||
s.PointsWriter = cluster.NewPointsWriter()
|
|
||||||
s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout)
|
|
||||||
s.PointsWriter.MetaStore = s.MetaStore
|
|
||||||
s.PointsWriter.TSDBStore = s.TSDBStore
|
|
||||||
s.PointsWriter.ShardWriter = s.ShardWriter
|
|
||||||
s.PointsWriter.HintedHandoff = s.HintedHandoff
|
|
||||||
|
|
||||||
// Append services.
|
|
||||||
s.appendClusterService(c.Cluster)
|
|
||||||
s.appendPrecreatorService(c.Precreator)
|
|
||||||
s.appendSnapshotterService()
|
|
||||||
s.appendAdminService(c.Admin)
|
|
||||||
s.appendContinuousQueryService(c.ContinuousQuery)
|
|
||||||
s.appendHTTPDService(c.HTTPD)
|
|
||||||
s.appendCollectdService(c.Collectd)
|
|
||||||
if err := s.appendOpenTSDBService(c.OpenTSDB); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, g := range c.UDPs {
|
|
||||||
s.appendUDPService(g)
|
|
||||||
}
|
|
||||||
s.appendRetentionPolicyService(c.Retention)
|
|
||||||
for _, g := range c.Graphites {
|
|
||||||
if err := s.appendGraphiteService(g); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) appendClusterService(c cluster.Config) {
|
|
||||||
srv := cluster.NewService(c)
|
|
||||||
srv.TSDBStore = s.TSDBStore
|
|
||||||
srv.MetaStore = s.MetaStore
|
|
||||||
s.Services = append(s.Services, srv)
|
|
||||||
s.ClusterService = srv
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) appendSnapshotterService() {
|
|
||||||
srv := snapshotter.NewService()
|
|
||||||
srv.TSDBStore = s.TSDBStore
|
|
||||||
srv.MetaStore = s.MetaStore
|
|
||||||
s.Services = append(s.Services, srv)
|
|
||||||
s.SnapshotterService = srv
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) appendRetentionPolicyService(c retention.Config) {
|
|
||||||
if !c.Enabled {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
srv := retention.NewService(c)
|
|
||||||
srv.MetaStore = s.MetaStore
|
|
||||||
srv.TSDBStore = s.TSDBStore
|
|
||||||
s.Services = append(s.Services, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) appendAdminService(c admin.Config) {
|
|
||||||
if !c.Enabled {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
srv := admin.NewService(c)
|
|
||||||
s.Services = append(s.Services, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) appendHTTPDService(c httpd.Config) {
|
|
||||||
if !c.Enabled {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
srv := httpd.NewService(c)
|
|
||||||
srv.Handler.MetaStore = s.MetaStore
|
|
||||||
srv.Handler.QueryExecutor = s.QueryExecutor
|
|
||||||
srv.Handler.PointsWriter = s.PointsWriter
|
|
||||||
srv.Handler.Version = s.version
|
|
||||||
|
|
||||||
// If a ContinuousQuerier service has been started, attach it.
|
|
||||||
for _, srvc := range s.Services {
|
|
||||||
if cqsrvc, ok := srvc.(continuous_querier.ContinuousQuerier); ok {
|
|
||||||
srv.Handler.ContinuousQuerier = cqsrvc
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Services = append(s.Services, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) appendCollectdService(c collectd.Config) {
|
|
||||||
if !c.Enabled {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
srv := collectd.NewService(c)
|
|
||||||
srv.MetaStore = s.MetaStore
|
|
||||||
srv.PointsWriter = s.PointsWriter
|
|
||||||
s.Services = append(s.Services, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) appendOpenTSDBService(c opentsdb.Config) error {
|
|
||||||
if !c.Enabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
srv, err := opentsdb.NewService(c)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
srv.PointsWriter = s.PointsWriter
|
|
||||||
srv.MetaStore = s.MetaStore
|
|
||||||
s.Services = append(s.Services, srv)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) appendGraphiteService(c graphite.Config) error {
|
|
||||||
if !c.Enabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
srv, err := graphite.NewService(c)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
srv.PointsWriter = s.PointsWriter
|
|
||||||
srv.MetaStore = s.MetaStore
|
|
||||||
s.Services = append(s.Services, srv)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) appendPrecreatorService(c precreator.Config) error {
|
|
||||||
if !c.Enabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
srv, err := precreator.NewService(c)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
srv.MetaStore = s.MetaStore
|
|
||||||
s.Services = append(s.Services, srv)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) appendUDPService(c udp.Config) {
|
|
||||||
if !c.Enabled {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
srv := udp.NewService(c)
|
|
||||||
srv.PointsWriter = s.PointsWriter
|
|
||||||
s.Services = append(s.Services, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) appendContinuousQueryService(c continuous_querier.Config) {
|
|
||||||
if !c.Enabled {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
srv := continuous_querier.NewService(c)
|
|
||||||
srv.MetaStore = s.MetaStore
|
|
||||||
srv.QueryExecutor = s.QueryExecutor
|
|
||||||
srv.PointsWriter = s.PointsWriter
|
|
||||||
s.Services = append(s.Services, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Err returns an error channel that multiplexes all out of band errors received from all services.
|
|
||||||
func (s *Server) Err() <-chan error { return s.err }
|
|
||||||
|
|
||||||
// Open opens the meta and data store and all services.
|
|
||||||
func (s *Server) Open() error {
|
|
||||||
if err := func() error {
|
|
||||||
// Start profiling, if set.
|
|
||||||
startProfile(s.CPUProfile, s.MemProfile)
|
|
||||||
|
|
||||||
host, port, err := s.hostAddr()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
hostport := net.JoinHostPort(host, port)
|
|
||||||
addr, err := net.ResolveTCPAddr("tcp", hostport)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("resolve tcp: addr=%s, err=%s", hostport, err)
|
|
||||||
}
|
|
||||||
s.MetaStore.Addr = addr
|
|
||||||
s.MetaStore.RemoteAddr = &tcpaddr{hostport}
|
|
||||||
|
|
||||||
// Open shared TCP connection.
|
|
||||||
ln, err := net.Listen("tcp", s.BindAddress)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("listen: %s", err)
|
|
||||||
}
|
|
||||||
s.Listener = ln
|
|
||||||
|
|
||||||
// The port 0 is used, we need to retrieve the port assigned by the kernel
|
|
||||||
if strings.HasSuffix(s.BindAddress, ":0") {
|
|
||||||
s.MetaStore.Addr = ln.Addr()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multiplex listener.
|
|
||||||
mux := tcp.NewMux()
|
|
||||||
s.MetaStore.RaftListener = mux.Listen(meta.MuxRaftHeader)
|
|
||||||
s.MetaStore.ExecListener = mux.Listen(meta.MuxExecHeader)
|
|
||||||
s.MetaStore.RPCListener = mux.Listen(meta.MuxRPCHeader)
|
|
||||||
|
|
||||||
s.ClusterService.Listener = mux.Listen(cluster.MuxHeader)
|
|
||||||
s.SnapshotterService.Listener = mux.Listen(snapshotter.MuxHeader)
|
|
||||||
go mux.Serve(ln)
|
|
||||||
|
|
||||||
// Open meta store.
|
|
||||||
if err := s.MetaStore.Open(); err != nil {
|
|
||||||
return fmt.Errorf("open meta store: %s", err)
|
|
||||||
}
|
|
||||||
go s.monitorErrorChan(s.MetaStore.Err())
|
|
||||||
|
|
||||||
// Wait for the store to initialize.
|
|
||||||
<-s.MetaStore.Ready()
|
|
||||||
|
|
||||||
// Open TSDB store.
|
|
||||||
if err := s.TSDBStore.Open(); err != nil {
|
|
||||||
return fmt.Errorf("open tsdb store: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open the hinted handoff service
|
|
||||||
if err := s.HintedHandoff.Open(); err != nil {
|
|
||||||
return fmt.Errorf("open hinted handoff: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, service := range s.Services {
|
|
||||||
if err := service.Open(); err != nil {
|
|
||||||
return fmt.Errorf("open service: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start the reporting service, if not disabled.
|
|
||||||
if !s.reportingDisabled {
|
|
||||||
go s.startServerReporting()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}(); err != nil {
|
|
||||||
s.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close shuts down the meta and data stores and all services.
|
|
||||||
func (s *Server) Close() error {
|
|
||||||
stopProfile()
|
|
||||||
|
|
||||||
if s.Listener != nil {
|
|
||||||
s.Listener.Close()
|
|
||||||
}
|
|
||||||
if s.MetaStore != nil {
|
|
||||||
s.MetaStore.Close()
|
|
||||||
}
|
|
||||||
if s.TSDBStore != nil {
|
|
||||||
s.TSDBStore.Close()
|
|
||||||
}
|
|
||||||
if s.HintedHandoff != nil {
|
|
||||||
s.HintedHandoff.Close()
|
|
||||||
}
|
|
||||||
for _, service := range s.Services {
|
|
||||||
service.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
close(s.closing)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// startServerReporting starts periodic server reporting.
|
|
||||||
func (s *Server) startServerReporting() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-s.closing:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
if err := s.MetaStore.WaitForLeader(30 * time.Second); err != nil {
|
|
||||||
log.Printf("no leader available for reporting: %s", err.Error())
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s.reportServer()
|
|
||||||
<-time.After(24 * time.Hour)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// reportServer reports anonymous statistics about the system.
|
|
||||||
func (s *Server) reportServer() {
|
|
||||||
dis, err := s.MetaStore.Databases()
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("failed to retrieve databases for reporting: %s", err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
numDatabases := len(dis)
|
|
||||||
|
|
||||||
numMeasurements := 0
|
|
||||||
numSeries := 0
|
|
||||||
for _, di := range dis {
|
|
||||||
d := s.TSDBStore.DatabaseIndex(di.Name)
|
|
||||||
if d == nil {
|
|
||||||
// No data in this store for this database.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
m, s := d.MeasurementSeriesCounts()
|
|
||||||
numMeasurements += m
|
|
||||||
numSeries += s
|
|
||||||
}
|
|
||||||
|
|
||||||
clusterID, err := s.MetaStore.ClusterID()
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("failed to retrieve cluster ID for reporting: %s", err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
json := fmt.Sprintf(`[{
|
|
||||||
"name":"reports",
|
|
||||||
"columns":["os", "arch", "version", "server_id", "cluster_id", "num_series", "num_measurements", "num_databases"],
|
|
||||||
"points":[["%s", "%s", "%s", "%x", "%x", "%d", "%d", "%d"]]
|
|
||||||
}]`, runtime.GOOS, runtime.GOARCH, s.version, s.MetaStore.NodeID(), clusterID, numSeries, numMeasurements, numDatabases)
|
|
||||||
|
|
||||||
data := bytes.NewBufferString(json)
|
|
||||||
|
|
||||||
log.Printf("Sending anonymous usage statistics to m.influxdb.com")
|
|
||||||
|
|
||||||
client := http.Client{Timeout: time.Duration(5 * time.Second)}
|
|
||||||
go client.Post("http://m.influxdb.com:8086/db/reporting/series?u=reporter&p=influxdb", "application/json", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// monitorErrorChan reads an error channel and resends it through the server.
|
|
||||||
func (s *Server) monitorErrorChan(ch <-chan error) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case err, ok := <-ch:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.err <- err
|
|
||||||
case <-s.closing:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// hostAddr returns the host and port that remote nodes will use to reach this
|
|
||||||
// node.
|
|
||||||
func (s *Server) hostAddr() (string, string, error) {
|
|
||||||
// Resolve host to address.
|
|
||||||
_, port, err := net.SplitHostPort(s.BindAddress)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf("split bind address: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
host := s.Hostname
|
|
||||||
|
|
||||||
// See if we might have a port that will override the BindAddress port
|
|
||||||
if host != "" && host[len(host)-1] >= '0' && host[len(host)-1] <= '9' && strings.Contains(host, ":") {
|
|
||||||
hostArg, portArg, err := net.SplitHostPort(s.Hostname)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if hostArg != "" {
|
|
||||||
host = hostArg
|
|
||||||
}
|
|
||||||
|
|
||||||
if portArg != "" {
|
|
||||||
port = portArg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return host, port, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Service represents a service attached to the server.
|
|
||||||
type Service interface {
|
|
||||||
Open() error
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// prof stores the file locations of active profiles.
|
|
||||||
var prof struct {
|
|
||||||
cpu *os.File
|
|
||||||
mem *os.File
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartProfile initializes the cpu and memory profile, if specified.
|
|
||||||
func startProfile(cpuprofile, memprofile string) {
|
|
||||||
if cpuprofile != "" {
|
|
||||||
f, err := os.Create(cpuprofile)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("cpuprofile: %v", err)
|
|
||||||
}
|
|
||||||
log.Printf("writing CPU profile to: %s\n", cpuprofile)
|
|
||||||
prof.cpu = f
|
|
||||||
pprof.StartCPUProfile(prof.cpu)
|
|
||||||
}
|
|
||||||
|
|
||||||
if memprofile != "" {
|
|
||||||
f, err := os.Create(memprofile)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("memprofile: %v", err)
|
|
||||||
}
|
|
||||||
log.Printf("writing mem profile to: %s\n", memprofile)
|
|
||||||
prof.mem = f
|
|
||||||
runtime.MemProfileRate = 4096
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopProfile closes the cpu and memory profiles if they are running.
|
|
||||||
func stopProfile() {
|
|
||||||
if prof.cpu != nil {
|
|
||||||
pprof.StopCPUProfile()
|
|
||||||
prof.cpu.Close()
|
|
||||||
log.Println("CPU profile stopped")
|
|
||||||
}
|
|
||||||
if prof.mem != nil {
|
|
||||||
pprof.Lookup("heap").WriteTo(prof.mem, 0)
|
|
||||||
prof.mem.Close()
|
|
||||||
log.Println("mem profile stopped")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type tcpaddr struct{ host string }
|
|
||||||
|
|
||||||
func (a *tcpaddr) Network() string { return "tcp" }
|
|
||||||
func (a *tcpaddr) String() string { return a.host }
|
|
312
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_helpers_test.go
generated
vendored
312
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_helpers_test.go
generated
vendored
|
@ -1,312 +0,0 @@
|
||||||
// This package is a set of convenience helpers and structs to make integration testing easier
|
|
||||||
package run_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/cmd/influxd/run"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/services/httpd"
|
|
||||||
"github.com/influxdb/influxdb/toml"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Server represents a test wrapper for run.Server.
|
|
||||||
type Server struct {
|
|
||||||
*run.Server
|
|
||||||
Config *run.Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServer returns a new instance of Server.
|
|
||||||
func NewServer(c *run.Config) *Server {
|
|
||||||
srv, _ := run.NewServer(c, "testServer")
|
|
||||||
s := Server{
|
|
||||||
Server: srv,
|
|
||||||
Config: c,
|
|
||||||
}
|
|
||||||
s.TSDBStore.EngineOptions.Config = c.Data
|
|
||||||
configureLogging(&s)
|
|
||||||
return &s
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenServer opens a test server.
|
|
||||||
func OpenServer(c *run.Config, joinURLs string) *Server {
|
|
||||||
s := NewServer(c)
|
|
||||||
configureLogging(s)
|
|
||||||
if err := s.Open(); err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenServerWithVersion opens a test server with a specific version.
|
|
||||||
func OpenServerWithVersion(c *run.Config, version string) *Server {
|
|
||||||
srv, _ := run.NewServer(c, version)
|
|
||||||
s := Server{
|
|
||||||
Server: srv,
|
|
||||||
Config: c,
|
|
||||||
}
|
|
||||||
configureLogging(&s)
|
|
||||||
if err := s.Open(); err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return &s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close shuts down the server and removes all temporary paths.
|
|
||||||
func (s *Server) Close() {
|
|
||||||
os.RemoveAll(s.Config.Meta.Dir)
|
|
||||||
os.RemoveAll(s.Config.Data.Dir)
|
|
||||||
os.RemoveAll(s.Config.HintedHandoff.Dir)
|
|
||||||
s.Server.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// URL returns the base URL for the httpd endpoint.
|
|
||||||
func (s *Server) URL() string {
|
|
||||||
for _, service := range s.Services {
|
|
||||||
if service, ok := service.(*httpd.Service); ok {
|
|
||||||
return "http://" + service.Addr().String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic("httpd server not found in services")
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateDatabaseAndRetentionPolicy will create the database and retention policy.
|
|
||||||
func (s *Server) CreateDatabaseAndRetentionPolicy(db string, rp *meta.RetentionPolicyInfo) error {
|
|
||||||
if _, err := s.MetaStore.CreateDatabase(db); err != nil {
|
|
||||||
return err
|
|
||||||
} else if _, err := s.MetaStore.CreateRetentionPolicy(db, rp); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query executes a query against the server and returns the results.
|
|
||||||
func (s *Server) Query(query string) (results string, err error) {
|
|
||||||
return s.QueryWithParams(query, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query executes a query against the server and returns the results.
|
|
||||||
func (s *Server) QueryWithParams(query string, values url.Values) (results string, err error) {
|
|
||||||
if values == nil {
|
|
||||||
values = url.Values{}
|
|
||||||
}
|
|
||||||
values.Set("q", query)
|
|
||||||
resp, err := http.Get(s.URL() + "/query?" + values.Encode())
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
//} else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusBadRequest {
|
|
||||||
}
|
|
||||||
body := string(MustReadAll(resp.Body))
|
|
||||||
switch resp.StatusCode {
|
|
||||||
case http.StatusBadRequest:
|
|
||||||
if !expectPattern(".*error parsing query*.", body) {
|
|
||||||
return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body)
|
|
||||||
}
|
|
||||||
return body, nil
|
|
||||||
case http.StatusOK:
|
|
||||||
return body, nil
|
|
||||||
default:
|
|
||||||
return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write executes a write against the server and returns the results.
|
|
||||||
func (s *Server) Write(db, rp, body string, params url.Values) (results string, err error) {
|
|
||||||
if params == nil {
|
|
||||||
params = url.Values{}
|
|
||||||
}
|
|
||||||
if params.Get("db") == "" {
|
|
||||||
params.Set("db", db)
|
|
||||||
}
|
|
||||||
if params.Get("rp") == "" {
|
|
||||||
params.Set("rp", rp)
|
|
||||||
}
|
|
||||||
resp, err := http.Post(s.URL()+"/write?"+params.Encode(), "", strings.NewReader(body))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
} else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
|
|
||||||
return "", fmt.Errorf("invalid status code: code=%d, body=%s", resp.StatusCode, MustReadAll(resp.Body))
|
|
||||||
}
|
|
||||||
return string(MustReadAll(resp.Body)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConfig returns the default config with temporary paths.
|
|
||||||
func NewConfig() *run.Config {
|
|
||||||
c := run.NewConfig()
|
|
||||||
c.ReportingDisabled = true
|
|
||||||
c.Meta.Dir = MustTempFile()
|
|
||||||
c.Meta.BindAddress = "127.0.0.1:0"
|
|
||||||
c.Meta.HeartbeatTimeout = toml.Duration(50 * time.Millisecond)
|
|
||||||
c.Meta.ElectionTimeout = toml.Duration(50 * time.Millisecond)
|
|
||||||
c.Meta.LeaderLeaseTimeout = toml.Duration(50 * time.Millisecond)
|
|
||||||
c.Meta.CommitTimeout = toml.Duration(5 * time.Millisecond)
|
|
||||||
|
|
||||||
c.Data.Dir = MustTempFile()
|
|
||||||
c.Data.WALDir = MustTempFile()
|
|
||||||
|
|
||||||
c.HintedHandoff.Dir = MustTempFile()
|
|
||||||
|
|
||||||
c.HTTPD.Enabled = true
|
|
||||||
c.HTTPD.BindAddress = "127.0.0.1:0"
|
|
||||||
c.HTTPD.LogEnabled = testing.Verbose()
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRetentionPolicyInfo(name string, rf int, duration time.Duration) *meta.RetentionPolicyInfo {
|
|
||||||
return &meta.RetentionPolicyInfo{Name: name, ReplicaN: rf, Duration: duration}
|
|
||||||
}
|
|
||||||
|
|
||||||
func maxFloat64() string {
|
|
||||||
maxFloat64, _ := json.Marshal(math.MaxFloat64)
|
|
||||||
return string(maxFloat64)
|
|
||||||
}
|
|
||||||
|
|
||||||
func maxInt64() string {
|
|
||||||
maxInt64, _ := json.Marshal(^int64(0))
|
|
||||||
return string(maxInt64)
|
|
||||||
}
|
|
||||||
|
|
||||||
func now() time.Time {
|
|
||||||
return time.Now().UTC()
|
|
||||||
}
|
|
||||||
|
|
||||||
func yesterday() time.Time {
|
|
||||||
return now().Add(-1 * time.Hour * 24)
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustParseTime(layout, value string) time.Time {
|
|
||||||
tm, err := time.Parse(layout, value)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return tm
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustReadAll reads r. Panic on error.
|
|
||||||
func MustReadAll(r io.Reader) []byte {
|
|
||||||
b, err := ioutil.ReadAll(r)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustTempFile returns a path to a temporary file.
|
|
||||||
func MustTempFile() string {
|
|
||||||
f, err := ioutil.TempFile("", "influxd-")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
f.Close()
|
|
||||||
os.Remove(f.Name())
|
|
||||||
return f.Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
func expectPattern(exp, act string) bool {
|
|
||||||
re := regexp.MustCompile(exp)
|
|
||||||
if !re.MatchString(act) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
type Query struct {
|
|
||||||
name string
|
|
||||||
command string
|
|
||||||
params url.Values
|
|
||||||
exp, act string
|
|
||||||
pattern bool
|
|
||||||
skip bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute runs the command and returns an err if it fails
|
|
||||||
func (q *Query) Execute(s *Server) (err error) {
|
|
||||||
if q.params == nil {
|
|
||||||
q.act, err = s.Query(q.command)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
q.act, err = s.QueryWithParams(q.command, q.params)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *Query) success() bool {
|
|
||||||
if q.pattern {
|
|
||||||
return expectPattern(q.exp, q.act)
|
|
||||||
}
|
|
||||||
return q.exp == q.act
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *Query) Error(err error) string {
|
|
||||||
return fmt.Sprintf("%s: %v", q.name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *Query) failureMessage() string {
|
|
||||||
return fmt.Sprintf("%s: unexpected results\nquery: %s\nexp: %s\nactual: %s\n", q.name, q.command, q.exp, q.act)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Test struct {
|
|
||||||
initialized bool
|
|
||||||
write string
|
|
||||||
params url.Values
|
|
||||||
db string
|
|
||||||
rp string
|
|
||||||
exp string
|
|
||||||
queries []*Query
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTest(db, rp string) Test {
|
|
||||||
return Test{
|
|
||||||
db: db,
|
|
||||||
rp: rp,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Test) addQueries(q ...*Query) {
|
|
||||||
t.queries = append(t.queries, q...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *Test) init(s *Server) error {
|
|
||||||
if t.write == "" || t.initialized {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
t.initialized = true
|
|
||||||
if res, err := s.Write(t.db, t.rp, t.write, t.params); err != nil {
|
|
||||||
return err
|
|
||||||
} else if t.exp != res {
|
|
||||||
return fmt.Errorf("unexpected results\nexp: %s\ngot: %s\n", t.exp, res)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func configureLogging(s *Server) {
|
|
||||||
// Set the logger to discard unless verbose is on
|
|
||||||
if !testing.Verbose() {
|
|
||||||
type logSetter interface {
|
|
||||||
SetLogger(*log.Logger)
|
|
||||||
}
|
|
||||||
nullLogger := log.New(ioutil.Discard, "", 0)
|
|
||||||
s.MetaStore.Logger = nullLogger
|
|
||||||
s.TSDBStore.Logger = nullLogger
|
|
||||||
s.HintedHandoff.SetLogger(nullLogger)
|
|
||||||
for _, service := range s.Services {
|
|
||||||
if service, ok := service.(logSetter); ok {
|
|
||||||
service.SetLogger(nullLogger)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
3719
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go
generated
vendored
3719
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
150
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.md
generated
vendored
150
Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.md
generated
vendored
|
@ -1,150 +0,0 @@
|
||||||
# Server Integration Tests
|
|
||||||
|
|
||||||
Currently, the file `server_test.go` has integration tests for single node scenarios.
|
|
||||||
At some point we'll need to add cluster tests, and may add them in a different file, or
|
|
||||||
rename `server_test.go` to `server_single_node_test.go` or something like that.
|
|
||||||
|
|
||||||
## What is in a test?
|
|
||||||
|
|
||||||
Each test is broken apart effectively into the following areas:
|
|
||||||
|
|
||||||
- Write sample data
|
|
||||||
- Use cases for table driven test, that include a command (typically a query) and an expected result.
|
|
||||||
|
|
||||||
When each test runs it does the following:
|
|
||||||
|
|
||||||
- init: determines if there are any writes and if so, writes them to the in-memory database
|
|
||||||
- queries: iterate through each query, executing the command, and comparing the results to the expected result.
|
|
||||||
|
|
||||||
## Idempotent - Allows for parallel tests
|
|
||||||
|
|
||||||
Each test should be `idempotent`, meaining that its data will not be affected by other tests, or use cases within the table tests themselves.
|
|
||||||
This allows for parallel testing, keeping the test suite total execution time very low.
|
|
||||||
|
|
||||||
### Basic sample test
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Ensure the server can have a database with multiple measurements.
|
|
||||||
func TestServer_Query_Multiple_Measurements(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
s := OpenServer(NewConfig(), "")
|
|
||||||
defer s.Close()
|
|
||||||
|
|
||||||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure we do writes for measurements that will span across shards
|
|
||||||
writes := []string{
|
|
||||||
fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
|
||||||
fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()),
|
|
||||||
}
|
|
||||||
test := NewTest("db0", "rp0")
|
|
||||||
test.write = strings.Join(writes, "\n")
|
|
||||||
|
|
||||||
test.addQueries([]*Query{
|
|
||||||
&Query{
|
|
||||||
name: "measurement in one shard but not another shouldn't panic server",
|
|
||||||
command: `SELECT host,value FROM db0.rp0.cpu`,
|
|
||||||
exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`,
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
|
|
||||||
if err := test.init(s); err != nil {
|
|
||||||
t.Fatalf("test init failed: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, query := range test.queries {
|
|
||||||
if query.skip {
|
|
||||||
t.Logf("SKIP:: %s", query.name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := query.Execute(s); err != nil {
|
|
||||||
t.Error(query.Error(err))
|
|
||||||
} else if !query.success() {
|
|
||||||
t.Error(query.failureMessage())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Let's break this down:
|
|
||||||
|
|
||||||
In this test, we first tell it to run in parallel with the `t.Parallel()` call.
|
|
||||||
|
|
||||||
We then open a new server with:
|
|
||||||
|
|
||||||
```go
|
|
||||||
s := OpenServer(NewConfig(), "")
|
|
||||||
defer s.Close()
|
|
||||||
```
|
|
||||||
|
|
||||||
If needed, we create a database and default retention policy. This is usually needed
|
|
||||||
when inserting and querying data. This is not needed if you are testing commands like `CREATE DATABASE`, `SHOW DIAGNOSTICS`, etc.
|
|
||||||
|
|
||||||
```go
|
|
||||||
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Next, set up the write data you need:
|
|
||||||
|
|
||||||
```go
|
|
||||||
writes := []string{
|
|
||||||
fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
|
|
||||||
fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()),
|
|
||||||
}
|
|
||||||
```
|
|
||||||
Create a new test with the database and retention policy:
|
|
||||||
|
|
||||||
```go
|
|
||||||
test := NewTest("db0", "rp0")
|
|
||||||
```
|
|
||||||
|
|
||||||
Send in the writes:
|
|
||||||
```go
|
|
||||||
test.write = strings.Join(writes, "\n")
|
|
||||||
```
|
|
||||||
|
|
||||||
Add some queries (the second one is mocked out to show how to add more than one):
|
|
||||||
|
|
||||||
```go
|
|
||||||
test.addQueries([]*Query{
|
|
||||||
&Query{
|
|
||||||
name: "measurement in one shard but not another shouldn't panic server",
|
|
||||||
command: `SELECT host,value FROM db0.rp0.cpu`,
|
|
||||||
exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`,
|
|
||||||
},
|
|
||||||
&Query{
|
|
||||||
name: "another test here...",
|
|
||||||
command: `Some query command`,
|
|
||||||
exp: `the expected results`,
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
```
|
|
||||||
|
|
||||||
The rest of the code is boilerplate execution code. It is purposefully not refactored out to a helper
|
|
||||||
to make sure the test failure reports the proper lines for debugging purposes.
|
|
||||||
|
|
||||||
#### Running the tests
|
|
||||||
|
|
||||||
To run the tests:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
go test ./cmd/influxd/run -parallel 500 -timeout 10s
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Running a specific test
|
|
||||||
|
|
||||||
```sh
|
|
||||||
go test ./cmd/influxd/run -parallel 500 -timeout 10s -run TestServer_Query_Fill
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Verbose feedback
|
|
||||||
|
|
||||||
By default, all logs are silenced when testing. If you pass in the `-v` flag, the test suite becomes verbose, and enables all logging in the system
|
|
||||||
|
|
||||||
```sh
|
|
||||||
go test ./cmd/influxd/run -parallel 500 -timeout 10s -run TestServer_Query_Fill -v
|
|
||||||
```
|
|
|
@ -1,143 +0,0 @@
|
||||||
package influxdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/influxql"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GoDiagnostics captures basic information about the runtime.
|
|
||||||
type GoDiagnostics struct {
|
|
||||||
GoMaxProcs int
|
|
||||||
NumGoroutine int
|
|
||||||
Version string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGoDiagnostics returns a GoDiagnostics object.
|
|
||||||
func NewGoDiagnostics() *GoDiagnostics {
|
|
||||||
return &GoDiagnostics{
|
|
||||||
GoMaxProcs: runtime.GOMAXPROCS(0),
|
|
||||||
NumGoroutine: runtime.NumGoroutine(),
|
|
||||||
Version: runtime.Version(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsRow returns the GoDiagnostic object as an InfluxQL row.
|
|
||||||
func (g *GoDiagnostics) AsRow(measurement string, tags map[string]string) *influxql.Row {
|
|
||||||
return &influxql.Row{
|
|
||||||
Name: measurement,
|
|
||||||
Columns: []string{"time", "goMaxProcs", "numGoRoutine", "version"},
|
|
||||||
Tags: tags,
|
|
||||||
Values: [][]interface{}{[]interface{}{time.Now().UTC(),
|
|
||||||
g.GoMaxProcs, g.NumGoroutine, g.Version}},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SystemDiagnostics captures basic machine data.
|
|
||||||
type SystemDiagnostics struct {
|
|
||||||
Hostname string
|
|
||||||
PID int
|
|
||||||
OS string
|
|
||||||
Arch string
|
|
||||||
NumCPU int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSystemDiagnostics returns a SystemDiagnostics object.
|
|
||||||
func NewSystemDiagnostics() *SystemDiagnostics {
|
|
||||||
hostname, err := os.Hostname()
|
|
||||||
if err != nil {
|
|
||||||
hostname = "unknown"
|
|
||||||
}
|
|
||||||
|
|
||||||
return &SystemDiagnostics{
|
|
||||||
Hostname: hostname,
|
|
||||||
PID: os.Getpid(),
|
|
||||||
OS: runtime.GOOS,
|
|
||||||
Arch: runtime.GOARCH,
|
|
||||||
NumCPU: runtime.NumCPU(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsRow returns the GoDiagnostic object as an InfluxQL row.
|
|
||||||
func (s *SystemDiagnostics) AsRow(measurement string, tags map[string]string) *influxql.Row {
|
|
||||||
return &influxql.Row{
|
|
||||||
Name: measurement,
|
|
||||||
Columns: []string{"time", "hostname", "pid", "os", "arch", "numCPU"},
|
|
||||||
Tags: tags,
|
|
||||||
Values: [][]interface{}{[]interface{}{time.Now().UTC(),
|
|
||||||
s.Hostname, s.PID, s.OS, s.Arch, s.NumCPU}},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MemoryDiagnostics captures Go memory stats.
|
|
||||||
type MemoryDiagnostics struct {
|
|
||||||
Alloc int64
|
|
||||||
TotalAlloc int64
|
|
||||||
Sys int64
|
|
||||||
Lookups int64
|
|
||||||
Mallocs int64
|
|
||||||
Frees int64
|
|
||||||
HeapAlloc int64
|
|
||||||
HeapSys int64
|
|
||||||
HeapIdle int64
|
|
||||||
HeapInUse int64
|
|
||||||
HeapReleased int64
|
|
||||||
HeapObjects int64
|
|
||||||
PauseTotalNs int64
|
|
||||||
NumGC int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMemoryDiagnostics returns a MemoryDiagnostics object.
|
|
||||||
func NewMemoryDiagnostics() *MemoryDiagnostics {
|
|
||||||
var m runtime.MemStats
|
|
||||||
runtime.ReadMemStats(&m)
|
|
||||||
|
|
||||||
return &MemoryDiagnostics{
|
|
||||||
Alloc: int64(m.Alloc),
|
|
||||||
TotalAlloc: int64(m.TotalAlloc),
|
|
||||||
Sys: int64(m.Sys),
|
|
||||||
Lookups: int64(m.Lookups),
|
|
||||||
Mallocs: int64(m.Mallocs),
|
|
||||||
Frees: int64(m.Frees),
|
|
||||||
HeapAlloc: int64(m.HeapAlloc),
|
|
||||||
HeapSys: int64(m.HeapSys),
|
|
||||||
HeapIdle: int64(m.HeapIdle),
|
|
||||||
HeapInUse: int64(m.HeapInuse),
|
|
||||||
HeapReleased: int64(m.HeapReleased),
|
|
||||||
HeapObjects: int64(m.HeapObjects),
|
|
||||||
PauseTotalNs: int64(m.PauseTotalNs),
|
|
||||||
NumGC: int64(m.NumGC),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsRow returns the MemoryDiagnostics object as an InfluxQL row.
|
|
||||||
func (m *MemoryDiagnostics) AsRow(measurement string, tags map[string]string) *influxql.Row {
|
|
||||||
return &influxql.Row{
|
|
||||||
Name: measurement,
|
|
||||||
Columns: []string{"time", "alloc", "totalAlloc", "sys", "lookups", "mallocs", "frees", "heapAlloc",
|
|
||||||
"heapSys", "heapIdle", "heapInUse", "heapReleased", "heapObjects", "pauseTotalNs", "numGG"},
|
|
||||||
Tags: tags,
|
|
||||||
Values: [][]interface{}{[]interface{}{time.Now().UTC(),
|
|
||||||
m.Alloc, m.TotalAlloc, m.Sys, m.Lookups, m.Mallocs, m.Frees, m.HeapAlloc,
|
|
||||||
m.HeapSys, m.HeapIdle, m.HeapInUse, m.HeapReleased, m.HeapObjects, m.PauseTotalNs, m.NumGC}},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildDiagnostics capture basic build version information.
|
|
||||||
type BuildDiagnostics struct {
|
|
||||||
Version string
|
|
||||||
CommitHash string
|
|
||||||
}
|
|
||||||
|
|
||||||
// AsRow returns the BuildDiagnostics object as an InfluxQL row.
|
|
||||||
func (b *BuildDiagnostics) AsRow(measurement string, tags map[string]string) *influxql.Row {
|
|
||||||
return &influxql.Row{
|
|
||||||
Name: measurement,
|
|
||||||
Columns: []string{"time", "version", "commitHash"},
|
|
||||||
Tags: tags,
|
|
||||||
Values: [][]interface{}{[]interface{}{time.Now().UTC(),
|
|
||||||
b.Version, b.CommitHash}},
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,78 +0,0 @@
|
||||||
package influxdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrFieldsRequired is returned when a point does not any fields.
|
|
||||||
ErrFieldsRequired = errors.New("fields required")
|
|
||||||
|
|
||||||
// ErrFieldTypeConflict is returned when a new field already exists with a different type.
|
|
||||||
ErrFieldTypeConflict = errors.New("field type conflict")
|
|
||||||
)
|
|
||||||
|
|
||||||
func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) }
|
|
||||||
|
|
||||||
func ErrMeasurementNotFound(name string) error { return fmt.Errorf("measurement not found: %s", name) }
|
|
||||||
|
|
||||||
func Errorf(format string, a ...interface{}) (err error) {
|
|
||||||
if _, file, line, ok := runtime.Caller(2); ok {
|
|
||||||
a = append(a, file, line)
|
|
||||||
err = fmt.Errorf(format+" (%s:%d)", a...)
|
|
||||||
} else {
|
|
||||||
err = fmt.Errorf(format, a...)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsClientError indicates whether an error is a known client error.
|
|
||||||
func IsClientError(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == ErrFieldsRequired {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if err == ErrFieldTypeConflict {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Contains(err.Error(), ErrFieldTypeConflict.Error()) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// mustMarshal encodes a value to JSON.
|
|
||||||
// This will panic if an error occurs. This should only be used internally when
|
|
||||||
// an invalid marshal will cause corruption and a panic is appropriate.
|
|
||||||
func mustMarshalJSON(v interface{}) []byte {
|
|
||||||
b, err := json.Marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
panic("marshal: " + err.Error())
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// mustUnmarshalJSON decodes a value from JSON.
|
|
||||||
// This will panic if an error occurs. This should only be used internally when
|
|
||||||
// an invalid unmarshal will cause corruption and a panic is appropriate.
|
|
||||||
func mustUnmarshalJSON(b []byte, v interface{}) {
|
|
||||||
if err := json.Unmarshal(b, v); err != nil {
|
|
||||||
panic("unmarshal: " + err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// assert will panic with a given formatted message if the given condition is false.
|
|
||||||
func assert(condition bool, msg string, v ...interface{}) {
|
|
||||||
if !condition {
|
|
||||||
panic(fmt.Sprintf("assert failed: "+msg, v...))
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1 +0,0 @@
|
||||||
rvm use ruby-2.1.0@burn-in --create
|
|
|
@ -1,4 +0,0 @@
|
||||||
source 'https://rubygems.org'
|
|
||||||
|
|
||||||
gem "colorize"
|
|
||||||
gem "influxdb"
|
|
|
@ -1,14 +0,0 @@
|
||||||
GEM
|
|
||||||
remote: https://rubygems.org/
|
|
||||||
specs:
|
|
||||||
colorize (0.6.0)
|
|
||||||
influxdb (0.0.16)
|
|
||||||
json
|
|
||||||
json (1.8.1)
|
|
||||||
|
|
||||||
PLATFORMS
|
|
||||||
ruby
|
|
||||||
|
|
||||||
DEPENDENCIES
|
|
||||||
colorize
|
|
||||||
influxdb
|
|
|
@ -1,79 +0,0 @@
|
||||||
require "influxdb"
|
|
||||||
require "colorize"
|
|
||||||
require "benchmark"
|
|
||||||
|
|
||||||
require_relative "log"
|
|
||||||
require_relative "random_gaussian"
|
|
||||||
|
|
||||||
BATCH_SIZE = 10_000
|
|
||||||
|
|
||||||
Log.info "Starting burn-in suite"
|
|
||||||
master = InfluxDB::Client.new
|
|
||||||
master.delete_database("burn-in") rescue nil
|
|
||||||
master.create_database("burn-in")
|
|
||||||
master.create_database_user("burn-in", "user", "pass")
|
|
||||||
|
|
||||||
master.database = "burn-in"
|
|
||||||
# master.query "select * from test1 into test2;"
|
|
||||||
# master.query "select count(value) from test1 group by time(1m) into test2;"
|
|
||||||
|
|
||||||
influxdb = InfluxDB::Client.new "burn-in", username: "user", password: "pass"
|
|
||||||
|
|
||||||
Log.success "Connected to server #{influxdb.host}:#{influxdb.port}"
|
|
||||||
|
|
||||||
Log.log "Creating RandomGaussian(500, 25)"
|
|
||||||
gaussian = RandomGaussian.new(500, 25)
|
|
||||||
point_count = 0
|
|
||||||
|
|
||||||
while true
|
|
||||||
Log.log "Generating 10,000 points.."
|
|
||||||
points = []
|
|
||||||
BATCH_SIZE.times do |n|
|
|
||||||
points << {value: gaussian.rand.to_i.abs}
|
|
||||||
end
|
|
||||||
point_count += points.length
|
|
||||||
|
|
||||||
Log.info "Sending points to server.."
|
|
||||||
begin
|
|
||||||
st = Time.now
|
|
||||||
foo = influxdb.write_point("test1", points)
|
|
||||||
et = Time.now
|
|
||||||
Log.log foo.inspect
|
|
||||||
Log.log "#{et-st} seconds elapsed"
|
|
||||||
Log.success "Write successful."
|
|
||||||
rescue => e
|
|
||||||
Log.failure "Write failed:"
|
|
||||||
Log.log e
|
|
||||||
end
|
|
||||||
sleep 0.5
|
|
||||||
|
|
||||||
Log.info "Checking regular points"
|
|
||||||
st = Time.now
|
|
||||||
response = influxdb.query("select count(value) from test1;")
|
|
||||||
et = Time.now
|
|
||||||
|
|
||||||
Log.log "#{et-st} seconds elapsed"
|
|
||||||
|
|
||||||
response_count = response["test1"].first["count"]
|
|
||||||
if point_count == response_count
|
|
||||||
Log.success "Point counts match: #{point_count} == #{response_count}"
|
|
||||||
else
|
|
||||||
Log.failure "Point counts don't match: #{point_count} != #{response_count}"
|
|
||||||
end
|
|
||||||
|
|
||||||
# Log.info "Checking continuous query points for test2"
|
|
||||||
# st = Time.now
|
|
||||||
# response = influxdb.query("select count(value) from test2;")
|
|
||||||
# et = Time.now
|
|
||||||
|
|
||||||
# Log.log "#{et-st} seconds elapsed"
|
|
||||||
|
|
||||||
# response_count = response["test2"].first["count"]
|
|
||||||
# if point_count == response_count
|
|
||||||
# Log.success "Point counts match: #{point_count} == #{response_count}"
|
|
||||||
# else
|
|
||||||
# Log.failure "Point counts don't match: #{point_count} != #{response_count}"
|
|
||||||
# end
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
module Log
|
|
||||||
def self.info(msg)
|
|
||||||
print Time.now.strftime("%r") + " | "
|
|
||||||
puts msg.to_s.colorize(:yellow)
|
|
||||||
end
|
|
||||||
|
|
||||||
def self.success(msg)
|
|
||||||
print Time.now.strftime("%r") + " | "
|
|
||||||
puts msg.to_s.colorize(:green)
|
|
||||||
end
|
|
||||||
|
|
||||||
def self.failure(msg)
|
|
||||||
print Time.now.strftime("%r") + " | "
|
|
||||||
puts msg.to_s.colorize(:red)
|
|
||||||
end
|
|
||||||
|
|
||||||
def self.log(msg)
|
|
||||||
print Time.now.strftime("%r") + " | "
|
|
||||||
puts msg.to_s
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
31
Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_gaussian.rb
generated
vendored
31
Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_gaussian.rb
generated
vendored
|
@ -1,31 +0,0 @@
|
||||||
class RandomGaussian
|
|
||||||
def initialize(mean, stddev, rand_helper = lambda { Kernel.rand })
|
|
||||||
@rand_helper = rand_helper
|
|
||||||
@mean = mean
|
|
||||||
@stddev = stddev
|
|
||||||
@valid = false
|
|
||||||
@next = 0
|
|
||||||
end
|
|
||||||
|
|
||||||
def rand
|
|
||||||
if @valid then
|
|
||||||
@valid = false
|
|
||||||
return @next
|
|
||||||
else
|
|
||||||
@valid = true
|
|
||||||
x, y = self.class.gaussian(@mean, @stddev, @rand_helper)
|
|
||||||
@next = y
|
|
||||||
return x
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
private
|
|
||||||
def self.gaussian(mean, stddev, rand)
|
|
||||||
theta = 2 * Math::PI * rand.call
|
|
||||||
rho = Math.sqrt(-2 * Math.log(1 - rand.call))
|
|
||||||
scale = stddev * rho
|
|
||||||
x = mean + scale * Math.cos(theta)
|
|
||||||
y = mean + scale * Math.sin(theta)
|
|
||||||
return x, y
|
|
||||||
end
|
|
||||||
end
|
|
29
Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_points.rb
generated
vendored
29
Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_points.rb
generated
vendored
|
@ -1,29 +0,0 @@
|
||||||
require "influxdb"
|
|
||||||
|
|
||||||
ONE_WEEK_IN_SECONDS = 7*24*60*60
|
|
||||||
NUM_POINTS = 10_000
|
|
||||||
BATCHES = 100
|
|
||||||
|
|
||||||
master = InfluxDB::Client.new
|
|
||||||
master.delete_database("ctx") rescue nil
|
|
||||||
master.create_database("ctx")
|
|
||||||
|
|
||||||
influxdb = InfluxDB::Client.new "ctx"
|
|
||||||
influxdb.time_precision = "s"
|
|
||||||
|
|
||||||
names = ["foo", "bar", "baz", "quu", "qux"]
|
|
||||||
|
|
||||||
st = Time.now
|
|
||||||
BATCHES.times do |m|
|
|
||||||
points = []
|
|
||||||
|
|
||||||
puts "Writing #{NUM_POINTS} points, time ##{m}.."
|
|
||||||
NUM_POINTS.times do |n|
|
|
||||||
timestamp = Time.now.to_i - rand(ONE_WEEK_IN_SECONDS)
|
|
||||||
points << {value: names.sample, time: timestamp}
|
|
||||||
end
|
|
||||||
|
|
||||||
influxdb.write_point("ct1", points)
|
|
||||||
end
|
|
||||||
puts st
|
|
||||||
puts Time.now
|
|
|
@ -1,246 +0,0 @@
|
||||||
### Welcome to the InfluxDB configuration file.
|
|
||||||
|
|
||||||
# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com
|
|
||||||
# The data includes raft id (random 8 bytes), os, arch, version, and metadata.
|
|
||||||
# We don't track ip addresses of servers reporting. This is only used
|
|
||||||
# to track the number of instances running and the versions, which
|
|
||||||
# is very helpful for us.
|
|
||||||
# Change this option to true to disable reporting.
|
|
||||||
reporting-disabled = false
|
|
||||||
|
|
||||||
###
|
|
||||||
### [meta]
|
|
||||||
###
|
|
||||||
### Controls the parameters for the Raft consensus group that stores metadata
|
|
||||||
### about the InfluxDB cluster.
|
|
||||||
###
|
|
||||||
|
|
||||||
[meta]
|
|
||||||
dir = "/var/opt/influxdb/meta"
|
|
||||||
hostname = "localhost"
|
|
||||||
bind-address = ":8088"
|
|
||||||
retention-autocreate = true
|
|
||||||
election-timeout = "1s"
|
|
||||||
heartbeat-timeout = "1s"
|
|
||||||
leader-lease-timeout = "500ms"
|
|
||||||
commit-timeout = "50ms"
|
|
||||||
|
|
||||||
###
|
|
||||||
### [data]
|
|
||||||
###
|
|
||||||
### Controls where the actual shard data for InfluxDB lives and how it is
|
|
||||||
### flushed from the WAL. "dir" may need to be changed to a suitable place
|
|
||||||
### for your system, but the WAL settings are an advanced configuration. The
|
|
||||||
### defaults should work for most systems.
|
|
||||||
###
|
|
||||||
|
|
||||||
[data]
|
|
||||||
dir = "/var/opt/influxdb/data"
|
|
||||||
|
|
||||||
# The following WAL settings are for the b1 storage engine used in 0.9.2. They won't
|
|
||||||
# apply to any new shards created after upgrading to a version > 0.9.3.
|
|
||||||
max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB.
|
|
||||||
wal-flush-interval = "10m" # Maximum time data can sit in WAL before a flush.
|
|
||||||
wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed.
|
|
||||||
|
|
||||||
# These are the WAL settings for the storage engine >= 0.9.3
|
|
||||||
wal-dir = "/var/opt/influxdb/wal"
|
|
||||||
wal-enable-logging = true
|
|
||||||
|
|
||||||
# When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to
|
|
||||||
# flush to the index
|
|
||||||
# wal-ready-series-size = 25600
|
|
||||||
|
|
||||||
# Flush and compact a partition once this ratio of series are over the ready size
|
|
||||||
# wal-compaction-threshold = 0.6
|
|
||||||
|
|
||||||
# Force a flush and compaction if any series in a partition gets above this size in bytes
|
|
||||||
# wal-max-series-size = 2097152
|
|
||||||
|
|
||||||
# Force a flush of all series and full compaction if there have been no writes in this
|
|
||||||
# amount of time. This is useful for ensuring that shards that are cold for writes don't
|
|
||||||
# keep a bunch of data cached in memory and in the WAL.
|
|
||||||
# wal-flush-cold-interval = "10m"
|
|
||||||
|
|
||||||
# Force a partition to flush its largest series if it reaches this approximate size in
|
|
||||||
# bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory.
|
|
||||||
# The more memory you have, the bigger this can be.
|
|
||||||
# wal-partition-size-threshold = 20971520
|
|
||||||
|
|
||||||
###
|
|
||||||
### [cluster]
|
|
||||||
###
|
|
||||||
### Controls non-Raft cluster behavior, which generally includes how data is
|
|
||||||
### shared across shards.
|
|
||||||
###
|
|
||||||
|
|
||||||
[cluster]
|
|
||||||
shard-writer-timeout = "5s" # The time within which a shard must respond to write.
|
|
||||||
write-timeout = "5s" # The time within which a write operation must complete on the cluster.
|
|
||||||
|
|
||||||
###
|
|
||||||
### [retention]
|
|
||||||
###
|
|
||||||
### Controls the enforcement of retention policies for evicting old data.
|
|
||||||
###
|
|
||||||
|
|
||||||
[retention]
|
|
||||||
enabled = true
|
|
||||||
check-interval = "10m"
|
|
||||||
|
|
||||||
###
|
|
||||||
### [admin]
|
|
||||||
###
|
|
||||||
### Controls the availability of the built-in, web-based admin interface. If HTTPS is
|
|
||||||
### enabled for the admin interface, HTTPS must also be enabled on the [http] service.
|
|
||||||
###
|
|
||||||
|
|
||||||
[admin]
|
|
||||||
enabled = true
|
|
||||||
bind-address = ":8083"
|
|
||||||
https-enabled = false
|
|
||||||
https-certificate = "/etc/ssl/influxdb.pem"
|
|
||||||
|
|
||||||
###
|
|
||||||
### [http]
|
|
||||||
###
|
|
||||||
### Controls how the HTTP endpoints are configured. These are the primary
|
|
||||||
### mechanism for getting data into and out of InfluxDB.
|
|
||||||
###
|
|
||||||
|
|
||||||
[http]
|
|
||||||
enabled = true
|
|
||||||
bind-address = ":8086"
|
|
||||||
auth-enabled = false
|
|
||||||
log-enabled = true
|
|
||||||
write-tracing = false
|
|
||||||
pprof-enabled = false
|
|
||||||
https-enabled = false
|
|
||||||
https-certificate = "/etc/ssl/influxdb.pem"
|
|
||||||
|
|
||||||
###
|
|
||||||
### [[graphite]]
|
|
||||||
###
|
|
||||||
### Controls one or many listeners for Graphite data.
|
|
||||||
###
|
|
||||||
|
|
||||||
[[graphite]]
|
|
||||||
enabled = false
|
|
||||||
# bind-address = ":2003"
|
|
||||||
# protocol = "tcp"
|
|
||||||
# consistency-level = "one"
|
|
||||||
# name-separator = "."
|
|
||||||
|
|
||||||
# These next lines control how batching works. You should have this enabled
|
|
||||||
# otherwise you could get dropped metrics or poor performance. Batching
|
|
||||||
# will buffer points in memory if you have many coming in.
|
|
||||||
|
|
||||||
# batch-size = 1000 # will flush if this many points get buffered
|
|
||||||
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
|
|
||||||
|
|
||||||
## "name-schema" configures tag names for parsing the metric name from graphite protocol;
|
|
||||||
## separated by `name-separator`.
|
|
||||||
## The "measurement" tag is special and the corresponding field will become
|
|
||||||
## the name of the metric.
|
|
||||||
## e.g. "type.host.measurement.device" will parse "server.localhost.cpu.cpu0" as
|
|
||||||
## {
|
|
||||||
## measurement: "cpu",
|
|
||||||
## tags: {
|
|
||||||
## "type": "server",
|
|
||||||
## "host": "localhost,
|
|
||||||
## "device": "cpu0"
|
|
||||||
## }
|
|
||||||
## }
|
|
||||||
# name-schema = "type.host.measurement.device"
|
|
||||||
|
|
||||||
## If set to true, when the input metric name has more fields than `name-schema` specified,
|
|
||||||
## the extra fields will be ignored.
|
|
||||||
## Otherwise an error will be logged and the metric rejected.
|
|
||||||
# ignore-unnamed = true
|
|
||||||
|
|
||||||
###
|
|
||||||
### [collectd]
|
|
||||||
###
|
|
||||||
### Controls the listener for collectd data.
|
|
||||||
###
|
|
||||||
|
|
||||||
[collectd]
|
|
||||||
enabled = false
|
|
||||||
# bind-address = ""
|
|
||||||
# database = ""
|
|
||||||
# typesdb = ""
|
|
||||||
|
|
||||||
# These next lines control how batching works. You should have this enabled
|
|
||||||
# otherwise you could get dropped metrics or poor performance. Batching
|
|
||||||
# will buffer points in memory if you have many coming in.
|
|
||||||
|
|
||||||
# batch-size = 1000 # will flush if this many points get buffered
|
|
||||||
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
|
|
||||||
|
|
||||||
###
|
|
||||||
### [opentsdb]
|
|
||||||
###
|
|
||||||
### Controls the listener for OpenTSDB data.
|
|
||||||
###
|
|
||||||
|
|
||||||
[opentsdb]
|
|
||||||
enabled = false
|
|
||||||
# bind-address = ""
|
|
||||||
# database = ""
|
|
||||||
# retention-policy = ""
|
|
||||||
|
|
||||||
###
|
|
||||||
### [[udp]]
|
|
||||||
###
|
|
||||||
### Controls the listeners for InfluxDB line protocol data via UDP.
|
|
||||||
###
|
|
||||||
|
|
||||||
[[udp]]
|
|
||||||
enabled = false
|
|
||||||
# bind-address = ""
|
|
||||||
# database = ""
|
|
||||||
|
|
||||||
# These next lines control how batching works. You should have this enabled
|
|
||||||
# otherwise you could get dropped metrics or poor performance. Batching
|
|
||||||
# will buffer points in memory if you have many coming in.
|
|
||||||
|
|
||||||
# batch-size = 1000 # will flush if this many points get buffered
|
|
||||||
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
|
|
||||||
|
|
||||||
###
|
|
||||||
### [monitoring]
|
|
||||||
###
|
|
||||||
|
|
||||||
[monitoring]
|
|
||||||
enabled = true
|
|
||||||
write-interval = "24h"
|
|
||||||
|
|
||||||
###
|
|
||||||
### [continuous_queries]
|
|
||||||
###
|
|
||||||
### Controls how continuous queries are run within InfluxDB.
|
|
||||||
###
|
|
||||||
|
|
||||||
[continuous_queries]
|
|
||||||
log-enabled = true
|
|
||||||
enabled = true
|
|
||||||
recompute-previous-n = 2
|
|
||||||
recompute-no-older-than = "10m"
|
|
||||||
compute-runs-per-interval = 10
|
|
||||||
compute-no-more-than = "2m"
|
|
||||||
|
|
||||||
###
|
|
||||||
### [hinted-handoff]
|
|
||||||
###
|
|
||||||
### Controls the hinted handoff feature, which allows nodes to temporarily
|
|
||||||
### store queued data when one node of a cluster is down for a short period
|
|
||||||
### of time.
|
|
||||||
###
|
|
||||||
|
|
||||||
[hinted-handoff]
|
|
||||||
enabled = true
|
|
||||||
dir = "/var/opt/influxdb/hh"
|
|
||||||
max-size = 1073741824
|
|
||||||
max-age = "168h"
|
|
||||||
retry-rate-limit = 0
|
|
||||||
retry-interval = "1s"
|
|
|
@ -1,186 +0,0 @@
|
||||||
# Import/Export
|
|
||||||
|
|
||||||
## Exporting from 0.8.9
|
|
||||||
|
|
||||||
Version `0.8.9` of InfluxDB adds support to export your data to a format that can be imported into `0.9.3` and later.
|
|
||||||
|
|
||||||
### Design
|
|
||||||
|
|
||||||
`0.8.9` exports raw data to a flat file that includes two sections, `DDL` and `DML`. You can choose to export them independently (see below).
|
|
||||||
|
|
||||||
The `DDL` section contains the sql commands to create databases and retention policies. the `DML` section is [line protocol](https://github.com/influxdb/influxdb/blob/master/tsdb/README.md) and can be directly posted to the [http endpoint](https://influxdb.com/docs/v0.9/guides/writing_data.html) in `0.9`. Remember that batching is important and we don't recommend batch sizes over 5k.
|
|
||||||
|
|
||||||
You need to specify a database and shard group when you export.
|
|
||||||
|
|
||||||
To list out your shards, use the following http endpoint:
|
|
||||||
|
|
||||||
`/cluster/shard_spaces`
|
|
||||||
|
|
||||||
example:
|
|
||||||
```sh
|
|
||||||
http://username:password@localhost:8086/cluster/shard_spaces
|
|
||||||
```
|
|
||||||
|
|
||||||
Then, to export a database with then name "metrics" and a shard space with the name "default", issue the following curl command:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
curl -o export http://username:password@http://localhost:8086/export/metrics/default
|
|
||||||
```
|
|
||||||
|
|
||||||
Compression is supported, and will result in a significantly smaller file size.
|
|
||||||
|
|
||||||
Use the following command for compression:
|
|
||||||
```sh
|
|
||||||
curl -o export.gz --compressed http://username:password@http://localhost:8086/export/metrics/default
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also export just the `DDL` with this option:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
curl -o export.ddl http://username:password@http://localhost:8086/export/metrics/default?l=ddl
|
|
||||||
```
|
|
||||||
|
|
||||||
Or just the `DML` with this option:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
curl -o export.dml.gz --compressed http://username:password@http://localhost:8086/export/metrics/default?l=dml
|
|
||||||
```
|
|
||||||
|
|
||||||
### Assumptions
|
|
||||||
|
|
||||||
- Series name mapping follows these [guidelines](https://influxdb.com/docs/v0.8/advanced_topics/schema_design.html)
|
|
||||||
- Database name will map directly from `0.8` to `0.9`
|
|
||||||
- Shard Spaces map to Retention Policies
|
|
||||||
- Shard Space Duration is ignored, as in `0.9` we determine shard size automatically
|
|
||||||
- Regex is used to match the correct series names and only exports that data for the database
|
|
||||||
- Duration becomes the new Retention Policy duration
|
|
||||||
|
|
||||||
- Users are not migrated due to inability to get passwords. Anyone using users will need to manually set these back up in `0.9`
|
|
||||||
|
|
||||||
### Upgrade Recommendations
|
|
||||||
|
|
||||||
It's recommended that you upgrade to `0.9.3` first and have all your writes going there. Then, on the `0.8.X` instances, upgrade to `0.8.9`.
|
|
||||||
|
|
||||||
It is important that when exporting you change your config to allow for the http endpoints not timing out. To do so, make this change in your config:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
# Configure the http api
|
|
||||||
[api]
|
|
||||||
read-timeout = "0s"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Exceptions
|
|
||||||
|
|
||||||
If a series can't be exported to tags based on the guidelines mentioned above,
|
|
||||||
we will insert the entire series name as the measurement name. You can either
|
|
||||||
allow that to import into the new InfluxDB instance, or you can do your own
|
|
||||||
data massage on it prior to importing it.
|
|
||||||
|
|
||||||
For example, if you have the following series name:
|
|
||||||
|
|
||||||
```
|
|
||||||
metric.disk.c.host.server01.single
|
|
||||||
```
|
|
||||||
|
|
||||||
It will export as exactly thta as the measurement name and no tags:
|
|
||||||
|
|
||||||
```
|
|
||||||
metric.disk.c.host.server01.single
|
|
||||||
```
|
|
||||||
|
|
||||||
### Export Metrics
|
|
||||||
|
|
||||||
When you export, you will now get comments inline in the `DML`:
|
|
||||||
|
|
||||||
`# Found 999 Series for export`
|
|
||||||
|
|
||||||
As well as count totals for each series exported:
|
|
||||||
|
|
||||||
`# Series FOO - Points Exported: 999`
|
|
||||||
|
|
||||||
With a total at the bottom:
|
|
||||||
|
|
||||||
`# Points Exported: 999`
|
|
||||||
|
|
||||||
You can grep the file that was exported at the end to get all the export metrics:
|
|
||||||
|
|
||||||
`cat myexport | grep Exported`
|
|
||||||
|
|
||||||
## Importing
|
|
||||||
|
|
||||||
Version `0.9.3` of InfluxDB adds support to import your data from version `0.8.9`.
|
|
||||||
|
|
||||||
## Caveats
|
|
||||||
|
|
||||||
For the export/import to work, all requisites have to be met. For export, all series names in `0.8` should be in the following format:
|
|
||||||
|
|
||||||
```
|
|
||||||
<tagName>.<tagValue>.<tagName>.<tagValue>.<measurement>
|
|
||||||
```
|
|
||||||
for example:
|
|
||||||
```
|
|
||||||
az.us-west-1.host.serverA.cpu
|
|
||||||
```
|
|
||||||
or any number of tags
|
|
||||||
```
|
|
||||||
building.2.temperature
|
|
||||||
```
|
|
||||||
|
|
||||||
Additionally, the fields need to have a consistent type (all float64, int64, etc) for every write in `0.8`. Otherwise they have the potential to fail writes in the import.
|
|
||||||
See below for more information.
|
|
||||||
|
|
||||||
## Running the import command
|
|
||||||
|
|
||||||
To import via the cli, you can specify the following command:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
influx -import -path=metrics-default.gz -compressed
|
|
||||||
```
|
|
||||||
|
|
||||||
If the file is not compressed you can issue it without the `-compressed` flag:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
influx -import -path=metrics-default
|
|
||||||
```
|
|
||||||
|
|
||||||
To redirect failed import lines to another file, run this command:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
influx -import -path=metrics-default.gz -compressed > failures
|
|
||||||
```
|
|
||||||
|
|
||||||
The import will use the line protocol in batches of 5,000 lines per batch when sending data to the server.
|
|
||||||
|
|
||||||
### Throttiling the import
|
|
||||||
|
|
||||||
If you need to throttle the import so the database has time to ingest, you can use the `-pps` flag. This will limit the points per second that will be sent to the server.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
influx -import -path=metrics-default.gz -compressed -pps 50000 > failures
|
|
||||||
```
|
|
||||||
|
|
||||||
Which is stating that you don't want MORE than 50,000 points per second to write to the database. Due to the processing that is taking place however, you will likely never get exactly 50,000 pps, more like 35,000 pps, etc.
|
|
||||||
|
|
||||||
## Understanding the results of the import
|
|
||||||
|
|
||||||
During the import, a status message will write out for every 100,000 points imported and report stats on the progress of the import:
|
|
||||||
|
|
||||||
```
|
|
||||||
2015/08/21 14:48:01 Processed 3100000 lines. Time elapsed: 56.740578415s. Points per second (PPS): 54634
|
|
||||||
```
|
|
||||||
|
|
||||||
The batch will give some basic stats when finished:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
2015/07/29 23:15:20 Processed 2 commands
|
|
||||||
2015/07/29 23:15:20 Processed 70207923 inserts
|
|
||||||
2015/07/29 23:15:20 Failed 29785000 inserts
|
|
||||||
```
|
|
||||||
|
|
||||||
Most inserts fail due to the following types of error:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
2015/07/29 22:18:28 error writing batch: write failed: field type conflict: input field "value" on measurement "metric" is type float64, already exists as type integer
|
|
||||||
```
|
|
||||||
|
|
||||||
This is due to the fact that in `0.8` a field could get created and saved as int or float types for independent writes. In `0.9` the field has to have a consistent type.
|
|
|
@ -1,236 +0,0 @@
|
||||||
package v8
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"compress/gzip"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/client"
|
|
||||||
)
|
|
||||||
|
|
||||||
const batchSize = 5000
|
|
||||||
|
|
||||||
// Config is the config used to initialize a Importer importer
|
|
||||||
type Config struct {
|
|
||||||
Username string
|
|
||||||
Password string
|
|
||||||
URL url.URL
|
|
||||||
Precision string
|
|
||||||
WriteConsistency string
|
|
||||||
Path string
|
|
||||||
Version string
|
|
||||||
Compressed bool
|
|
||||||
PPS int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConfig returns an initialized *Config
|
|
||||||
func NewConfig() *Config {
|
|
||||||
return &Config{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Importer is the importer used for importing 0.8 data
|
|
||||||
type Importer struct {
|
|
||||||
client *client.Client
|
|
||||||
database string
|
|
||||||
retentionPolicy string
|
|
||||||
config *Config
|
|
||||||
batch []string
|
|
||||||
totalInserts int
|
|
||||||
failedInserts int
|
|
||||||
totalCommands int
|
|
||||||
throttlePointsWritten int
|
|
||||||
lastWrite time.Time
|
|
||||||
throttle *time.Ticker
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewImporter will return an intialized Importer struct
|
|
||||||
func NewImporter(config *Config) *Importer {
|
|
||||||
return &Importer{
|
|
||||||
config: config,
|
|
||||||
batch: make([]string, 0, batchSize),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Import processes the specified file in the Config and writes the data to the databases in chunks specified by batchSize
|
|
||||||
func (i *Importer) Import() error {
|
|
||||||
// Create a client and try to connect
|
|
||||||
config := client.NewConfig()
|
|
||||||
config.URL = i.config.URL
|
|
||||||
config.Username = i.config.Username
|
|
||||||
config.Password = i.config.Password
|
|
||||||
config.UserAgent = fmt.Sprintf("influxDB importer/%s", i.config.Version)
|
|
||||||
cl, err := client.NewClient(config)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("could not create client %s", err)
|
|
||||||
}
|
|
||||||
i.client = cl
|
|
||||||
if _, _, e := i.client.Ping(); e != nil {
|
|
||||||
return fmt.Errorf("failed to connect to %s\n", i.client.Addr())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate args
|
|
||||||
if i.config.Path == "" {
|
|
||||||
return fmt.Errorf("file argument required")
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if i.totalInserts > 0 {
|
|
||||||
log.Printf("Processed %d commands\n", i.totalCommands)
|
|
||||||
log.Printf("Processed %d inserts\n", i.totalInserts)
|
|
||||||
log.Printf("Failed %d inserts\n", i.failedInserts)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Open the file
|
|
||||||
f, err := os.Open(i.config.Path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
var r io.Reader
|
|
||||||
|
|
||||||
// If gzipped, wrap in a gzip reader
|
|
||||||
if i.config.Compressed {
|
|
||||||
gr, err := gzip.NewReader(f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer gr.Close()
|
|
||||||
// Set the reader to the gzip reader
|
|
||||||
r = gr
|
|
||||||
} else {
|
|
||||||
// Standard text file so our reader can just be the file
|
|
||||||
r = f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get our reader
|
|
||||||
scanner := bufio.NewScanner(r)
|
|
||||||
|
|
||||||
// Process the DDL
|
|
||||||
i.processDDL(scanner)
|
|
||||||
|
|
||||||
// Set up our throttle channel. Since there is effectively no other activity at this point
|
|
||||||
// the smaller resolution gets us much closer to the requested PPS
|
|
||||||
i.throttle = time.NewTicker(time.Microsecond)
|
|
||||||
defer i.throttle.Stop()
|
|
||||||
|
|
||||||
// Prime the last write
|
|
||||||
i.lastWrite = time.Now()
|
|
||||||
|
|
||||||
// Process the DML
|
|
||||||
i.processDML(scanner)
|
|
||||||
|
|
||||||
// Check if we had any errors scanning the file
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
return fmt.Errorf("reading standard input: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Importer) processDDL(scanner *bufio.Scanner) {
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
// If we find the DML token, we are done with DDL
|
|
||||||
if strings.HasPrefix(line, "# DML") {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(line, "#") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
i.queryExecutor(line)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Importer) processDML(scanner *bufio.Scanner) {
|
|
||||||
start := time.Now()
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
if strings.HasPrefix(line, "# CONTEXT-DATABASE:") {
|
|
||||||
i.database = strings.TrimSpace(strings.Split(line, ":")[1])
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(line, "# CONTEXT-RETENTION-POLICY:") {
|
|
||||||
i.retentionPolicy = strings.TrimSpace(strings.Split(line, ":")[1])
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(line, "#") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
i.batchAccumulator(line, start)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Importer) execute(command string) {
|
|
||||||
response, err := i.client.Query(client.Query{Command: command, Database: i.database})
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("error: %s\n", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := response.Error(); err != nil {
|
|
||||||
log.Printf("error: %s\n", response.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Importer) queryExecutor(command string) {
|
|
||||||
i.totalCommands++
|
|
||||||
i.execute(command)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Importer) batchAccumulator(line string, start time.Time) {
|
|
||||||
i.batch = append(i.batch, line)
|
|
||||||
if len(i.batch) == batchSize {
|
|
||||||
if e := i.batchWrite(); e != nil {
|
|
||||||
log.Println("error writing batch: ", e)
|
|
||||||
// Output failed lines to STDOUT so users can capture lines that failed to import
|
|
||||||
fmt.Println(strings.Join(i.batch, "\n"))
|
|
||||||
i.failedInserts += len(i.batch)
|
|
||||||
} else {
|
|
||||||
i.totalInserts += len(i.batch)
|
|
||||||
}
|
|
||||||
i.batch = i.batch[:0]
|
|
||||||
// Give some status feedback every 100000 lines processed
|
|
||||||
processed := i.totalInserts + i.failedInserts
|
|
||||||
if processed%100000 == 0 {
|
|
||||||
since := time.Since(start)
|
|
||||||
pps := float64(processed) / since.Seconds()
|
|
||||||
log.Printf("Processed %d lines. Time elapsed: %s. Points per second (PPS): %d", processed, since.String(), int64(pps))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Importer) batchWrite() error {
|
|
||||||
// Accumulate the batch size to see how many points we have written this second
|
|
||||||
i.throttlePointsWritten += len(i.batch)
|
|
||||||
|
|
||||||
// Find out when we last wrote data
|
|
||||||
since := time.Since(i.lastWrite)
|
|
||||||
|
|
||||||
// Check to see if we've exceeded our points per second for the current timeframe
|
|
||||||
var currentPPS int
|
|
||||||
if since.Seconds() > 0 {
|
|
||||||
currentPPS = int(float64(i.throttlePointsWritten) / since.Seconds())
|
|
||||||
} else {
|
|
||||||
currentPPS = i.throttlePointsWritten
|
|
||||||
}
|
|
||||||
|
|
||||||
// If our currentPPS is greater than the PPS specified, then we wait and retry
|
|
||||||
if int(currentPPS) > i.config.PPS && i.config.PPS != 0 {
|
|
||||||
// Wait for the next tick
|
|
||||||
<-i.throttle.C
|
|
||||||
|
|
||||||
// Decrement the batch size back out as it is going to get called again
|
|
||||||
i.throttlePointsWritten -= len(i.batch)
|
|
||||||
return i.batchWrite()
|
|
||||||
}
|
|
||||||
|
|
||||||
_, e := i.client.WriteLineProtocol(strings.Join(i.batch, "\n"), i.database, i.retentionPolicy, i.config.Precision, i.config.WriteConsistency)
|
|
||||||
i.throttlePointsWritten = 0
|
|
||||||
i.lastWrite = time.Now()
|
|
||||||
return e
|
|
||||||
}
|
|
|
@ -1,14 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
REPO_DIR=`mktemp -d`
|
|
||||||
echo "Using $REPO_DIR for all work..."
|
|
||||||
|
|
||||||
cd $REPO_DIR
|
|
||||||
export GOPATH=`pwd`
|
|
||||||
mkdir -p $GOPATH/src/github.com/influxdb
|
|
||||||
cd $GOPATH/src/github.com/influxdb
|
|
||||||
git clone https://github.com/influxdb/influxdb.git
|
|
||||||
|
|
||||||
cd $GOPATH/src/github.com/influxdb/influxdb
|
|
||||||
NIGHTLY_BUILD=true ./package.sh 0.9.3-nightly-`git log --pretty=format:'%h' -n 1`
|
|
||||||
rm -rf $REPO_DIR
|
|
|
@ -1,409 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
# Packaging script which creates debian and RPM packages. It optionally
|
|
||||||
# tags the repo with the given version.
|
|
||||||
#
|
|
||||||
# Requirements: GOPATH must be set. 'fpm' must be on the path, and the AWS
|
|
||||||
# CLI tools must also be installed.
|
|
||||||
#
|
|
||||||
# https://github.com/jordansissel/fpm
|
|
||||||
# http://aws.amazon.com/cli/
|
|
||||||
#
|
|
||||||
# Packaging process: to package a build, simple execute:
|
|
||||||
#
|
|
||||||
# package.sh <version>
|
|
||||||
#
|
|
||||||
# where <version> is the desired version. If generation of a debian and RPM
|
|
||||||
# package is successful, the script will offer to tag the repo using the
|
|
||||||
# supplied version string.
|
|
||||||
#
|
|
||||||
# AWS upload: the script will also offer to upload the packages to S3. If
|
|
||||||
# this option is selected, the credentials should be present in the file
|
|
||||||
# ~/aws.conf. The contents should be of the form:
|
|
||||||
#
|
|
||||||
# [default]
|
|
||||||
# aws_access_key_id=<access ID>
|
|
||||||
# aws_secret_access_key=<secret key>
|
|
||||||
# region = us-east-1
|
|
||||||
#
|
|
||||||
# Trim the leading spaces when creating the file. The script will exit if
|
|
||||||
# S3 upload is requested, but this file does not exist.
|
|
||||||
|
|
||||||
[ -z $DEBUG ] || set -x
|
|
||||||
|
|
||||||
AWS_FILE=~/aws.conf
|
|
||||||
|
|
||||||
INSTALL_ROOT_DIR=/opt/influxdb
|
|
||||||
INFLUXDB_LOG_DIR=/var/log/influxdb
|
|
||||||
INFLUXDB_DATA_DIR=/var/opt/influxdb
|
|
||||||
CONFIG_ROOT_DIR=/etc/opt/influxdb
|
|
||||||
|
|
||||||
SAMPLE_CONFIGURATION=etc/config.sample.toml
|
|
||||||
INITD_SCRIPT=scripts/init.sh
|
|
||||||
|
|
||||||
TMP_WORK_DIR=`mktemp -d`
|
|
||||||
POST_INSTALL_PATH=`mktemp`
|
|
||||||
ARCH=`uname -i`
|
|
||||||
LICENSE=MIT
|
|
||||||
URL=influxdb.com
|
|
||||||
MAINTAINER=support@influxdb.com
|
|
||||||
VENDOR=Influxdb
|
|
||||||
DESCRIPTION="Distributed time-series database"
|
|
||||||
|
|
||||||
# Allow path to FPM to be set by environment variables. Some execution contexts
|
|
||||||
# like cron don't have PATH set correctly to pick it up.
|
|
||||||
if [ -z "$FPM" ]; then
|
|
||||||
FPM=`which fpm`
|
|
||||||
fi
|
|
||||||
|
|
||||||
GO_VERSION="go1.4.2"
|
|
||||||
GOPATH_INSTALL=
|
|
||||||
BINS=(
|
|
||||||
influxd
|
|
||||||
influx
|
|
||||||
)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
# Helper functions.
|
|
||||||
|
|
||||||
# usage prints simple usage information.
|
|
||||||
usage() {
|
|
||||||
echo -e "$0 [<version>] [-h]\n"
|
|
||||||
cleanup_exit $1
|
|
||||||
}
|
|
||||||
|
|
||||||
# cleanup_exit removes all resources created during the process and exits with
|
|
||||||
# the supplied returned code.
|
|
||||||
cleanup_exit() {
|
|
||||||
rm -r $TMP_WORK_DIR
|
|
||||||
rm $POST_INSTALL_PATH
|
|
||||||
exit $1
|
|
||||||
}
|
|
||||||
|
|
||||||
# current_branch echos the current git branch.
|
|
||||||
current_branch() {
|
|
||||||
echo `git rev-parse --abbrev-ref HEAD`
|
|
||||||
}
|
|
||||||
|
|
||||||
# check_gopath sanity checks the value of the GOPATH env variable, and determines
|
|
||||||
# the path where build artifacts are installed. GOPATH may be a colon-delimited
|
|
||||||
# list of directories.
|
|
||||||
check_gopath() {
|
|
||||||
[ -z "$GOPATH" ] && echo "GOPATH is not set." && cleanup_exit 1
|
|
||||||
GOPATH_INSTALL=`echo $GOPATH | cut -d ':' -f 1`
|
|
||||||
[ ! -d "$GOPATH_INSTALL" ] && echo "GOPATH_INSTALL is not a directory." && cleanup_exit 1
|
|
||||||
echo "GOPATH ($GOPATH) looks sane, using $GOPATH_INSTALL for installation."
|
|
||||||
}
|
|
||||||
|
|
||||||
check_gvm() {
|
|
||||||
if [ -n "$GOPATH" ]; then
|
|
||||||
existing_gopath=$GOPATH
|
|
||||||
fi
|
|
||||||
|
|
||||||
source $HOME/.gvm/scripts/gvm
|
|
||||||
which gvm
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "gvm not found -- aborting."
|
|
||||||
cleanup_exit $1
|
|
||||||
fi
|
|
||||||
gvm use $GO_VERSION
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "gvm cannot find Go version $GO_VERSION -- aborting."
|
|
||||||
cleanup_exit $1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Keep any existing GOPATH set.
|
|
||||||
if [ -n "$existing_gopath" ]; then
|
|
||||||
GOPATH=$existing_gopath
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# check_clean_tree ensures that no source file is locally modified.
|
|
||||||
check_clean_tree() {
|
|
||||||
modified=$(git ls-files --modified | wc -l)
|
|
||||||
if [ $modified -ne 0 ]; then
|
|
||||||
echo "The source tree is not clean -- aborting."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
echo "Git tree is clean."
|
|
||||||
}
|
|
||||||
|
|
||||||
# update_tree ensures the tree is in-sync with the repo.
|
|
||||||
update_tree() {
|
|
||||||
git pull origin $TARGET_BRANCH
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Failed to pull latest code -- aborting."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
git fetch --tags
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Failed to fetch tags -- aborting."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
echo "Git tree updated successfully."
|
|
||||||
}
|
|
||||||
|
|
||||||
# check_tag_exists checks if the existing release already exists in the tags.
|
|
||||||
check_tag_exists () {
|
|
||||||
version=$1
|
|
||||||
git tag | grep -q "^v$version$"
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
echo "Proposed version $version already exists as a tag -- aborting."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# make_dir_tree creates the directory structure within the packages.
|
|
||||||
make_dir_tree() {
|
|
||||||
work_dir=$1
|
|
||||||
version=$2
|
|
||||||
mkdir -p $work_dir/$INSTALL_ROOT_DIR/versions/$version/scripts
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Failed to create installation directory -- aborting."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
mkdir -p $work_dir/$CONFIG_ROOT_DIR
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Failed to create configuration directory -- aborting."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# do_build builds the code. The version and commit must be passed in.
|
|
||||||
do_build() {
|
|
||||||
for b in ${BINS[*]}; do
|
|
||||||
rm -f $GOPATH_INSTALL/bin/$b
|
|
||||||
done
|
|
||||||
go get -u -f -d ./...
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "WARNING: failed to 'go get' packages."
|
|
||||||
fi
|
|
||||||
|
|
||||||
git checkout $TARGET_BRANCH # go get switches to master, so ensure we're back.
|
|
||||||
version=$1
|
|
||||||
commit=`git rev-parse HEAD`
|
|
||||||
branch=`current_branch`
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Unable to retrieve current commit -- aborting"
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
go install -a -ldflags="-X main.version $version -X main.branch $branch -X main.commit $commit" ./...
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Build failed, unable to create package -- aborting"
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
echo "Build completed successfully."
|
|
||||||
}
|
|
||||||
|
|
||||||
# generate_postinstall_script creates the post-install script for the
|
|
||||||
# package. It must be passed the version.
|
|
||||||
generate_postinstall_script() {
|
|
||||||
version=$1
|
|
||||||
cat <<EOF >$POST_INSTALL_PATH
|
|
||||||
rm -f $INSTALL_ROOT_DIR/influxd
|
|
||||||
rm -f $INSTALL_ROOT_DIR/influx
|
|
||||||
rm -f $INSTALL_ROOT_DIR/init.sh
|
|
||||||
ln -s $INSTALL_ROOT_DIR/versions/$version/influxd $INSTALL_ROOT_DIR/influxd
|
|
||||||
ln -s $INSTALL_ROOT_DIR/versions/$version/influx $INSTALL_ROOT_DIR/influx
|
|
||||||
ln -s $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh $INSTALL_ROOT_DIR/init.sh
|
|
||||||
|
|
||||||
rm -f /etc/init.d/influxdb
|
|
||||||
ln -sfn $INSTALL_ROOT_DIR/init.sh /etc/init.d/influxdb
|
|
||||||
chmod +x /etc/init.d/influxdb
|
|
||||||
if which update-rc.d > /dev/null 2>&1 ; then
|
|
||||||
update-rc.d -f influxdb remove
|
|
||||||
update-rc.d influxdb defaults
|
|
||||||
else
|
|
||||||
chkconfig --add influxdb
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! id influxdb >/dev/null 2>&1; then
|
|
||||||
useradd --system -U -M influxdb
|
|
||||||
fi
|
|
||||||
chown -R -L influxdb:influxdb $INSTALL_ROOT_DIR
|
|
||||||
chmod -R a+rX $INSTALL_ROOT_DIR
|
|
||||||
|
|
||||||
mkdir -p $INFLUXDB_LOG_DIR
|
|
||||||
chown -R -L influxdb:influxdb $INFLUXDB_LOG_DIR
|
|
||||||
mkdir -p $INFLUXDB_DATA_DIR
|
|
||||||
chown -R -L influxdb:influxdb $INFLUXDB_DATA_DIR
|
|
||||||
EOF
|
|
||||||
echo "Post-install script created successfully at $POST_INSTALL_PATH"
|
|
||||||
}
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
# Start the packaging process.
|
|
||||||
|
|
||||||
if [ $# -ne 1 ]; then
|
|
||||||
usage 1
|
|
||||||
elif [ $1 == "-h" ]; then
|
|
||||||
usage 0
|
|
||||||
else
|
|
||||||
VERSION=$1
|
|
||||||
VERSION_UNDERSCORED=`echo "$VERSION" | tr - _`
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo -e "\nStarting package process...\n"
|
|
||||||
|
|
||||||
# Ensure the current is correct.
|
|
||||||
TARGET_BRANCH=`current_branch`
|
|
||||||
if [ -z "$NIGHTLY_BUILD" ]; then
|
|
||||||
echo -n "Current branch is $TARGET_BRANCH. Start packaging this branch? [Y/n] "
|
|
||||||
read response
|
|
||||||
response=`echo $response | tr 'A-Z' 'a-z'`
|
|
||||||
if [ "x$response" == "xn" ]; then
|
|
||||||
echo "Packaging aborted."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
check_gvm
|
|
||||||
check_gopath
|
|
||||||
if [ -z "$NIGHTLY_BUILD" ]; then
|
|
||||||
check_clean_tree
|
|
||||||
update_tree
|
|
||||||
check_tag_exists $VERSION
|
|
||||||
fi
|
|
||||||
|
|
||||||
do_build $VERSION
|
|
||||||
make_dir_tree $TMP_WORK_DIR $VERSION
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
# Copy the assets to the installation directories.
|
|
||||||
|
|
||||||
for b in ${BINS[*]}; do
|
|
||||||
cp $GOPATH_INSTALL/bin/$b $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Failed to copy binaries to packaging directory -- aborting."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
echo "${BINS[*]} copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION"
|
|
||||||
|
|
||||||
cp $INITD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Failed to copy init.d script to packaging directory -- aborting."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
echo "$INITD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts"
|
|
||||||
|
|
||||||
cp $SAMPLE_CONFIGURATION $TMP_WORK_DIR/$CONFIG_ROOT_DIR/influxdb.conf
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Failed to copy $SAMPLE_CONFIGURATION to packaging directory -- aborting."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
generate_postinstall_script $VERSION
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
# Create the actual packages.
|
|
||||||
|
|
||||||
if [ -z "$NIGHTLY_BUILD" ]; then
|
|
||||||
echo -n "Commence creation of $ARCH packages, version $VERSION? [Y/n] "
|
|
||||||
read response
|
|
||||||
response=`echo $response | tr 'A-Z' 'a-z'`
|
|
||||||
if [ "x$response" == "xn" ]; then
|
|
||||||
echo "Packaging aborted."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ $ARCH == "i386" ]; then
|
|
||||||
rpm_package=influxdb-${VERSION}-1.i686.rpm # RPM packages use 1 for default package release.
|
|
||||||
debian_package=influxdb_${VERSION}_i686.deb
|
|
||||||
deb_args="-a i686"
|
|
||||||
rpm_args="setarch i686"
|
|
||||||
elif [ $ARCH == "arm" ]; then
|
|
||||||
rpm_package=influxdb-${VERSION}-1.armel.rpm
|
|
||||||
debian_package=influxdb_${VERSION}_armel.deb
|
|
||||||
else
|
|
||||||
rpm_package=influxdb-${VERSION}-1.x86_64.rpm
|
|
||||||
debian_package=influxdb_${VERSION}_amd64.deb
|
|
||||||
fi
|
|
||||||
|
|
||||||
COMMON_FPM_ARGS="-C $TMP_WORK_DIR --vendor $VENDOR --url $URL --license $LICENSE --maintainer $MAINTAINER --after-install $POST_INSTALL_PATH --name influxdb --version $VERSION --config-files $CONFIG_ROOT_DIR ."
|
|
||||||
$rpm_args $FPM -s dir -t rpm --description "$DESCRIPTION" $COMMON_FPM_ARGS
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Failed to create RPM package -- aborting."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
echo "RPM package created successfully."
|
|
||||||
|
|
||||||
$FPM -s dir -t deb $deb_args --description "$DESCRIPTION" $COMMON_FPM_ARGS
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Failed to create Debian package -- aborting."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
echo "Debian package created successfully."
|
|
||||||
|
|
||||||
$FPM -s dir -t tar --prefix influxdb_${VERSION}_${ARCH} -p influxdb_${VERSION}_${ARCH}.tar.gz --description "$DESCRIPTION" $COMMON_FPM_ARGS
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Failed to create Tar package -- aborting."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
echo "Tar package created successfully."
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
# Offer to tag the repo.
|
|
||||||
|
|
||||||
if [ -z "$NIGHTLY_BUILD" ]; then
|
|
||||||
echo -n "Tag source tree with v$VERSION and push to repo? [y/N] "
|
|
||||||
read response
|
|
||||||
response=`echo $response | tr 'A-Z' 'a-z'`
|
|
||||||
if [ "x$response" == "xy" ]; then
|
|
||||||
echo "Creating tag v$VERSION and pushing to repo"
|
|
||||||
git tag v$VERSION
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Failed to create tag v$VERSION -- aborting"
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
git push origin v$VERSION
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Failed to push tag v$VERSION to repo -- aborting"
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Not creating tag v$VERSION."
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
# Offer to publish the packages.
|
|
||||||
|
|
||||||
if [ -z "$NIGHTLY_BUILD" ]; then
|
|
||||||
echo -n "Publish packages to S3? [y/N] "
|
|
||||||
read response
|
|
||||||
response=`echo $response | tr 'A-Z' 'a-z'`
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "x$response" == "xy" -o -n "$NIGHTLY_BUILD" ]; then
|
|
||||||
echo "Publishing packages to S3."
|
|
||||||
if [ ! -e "$AWS_FILE" ]; then
|
|
||||||
echo "$AWS_FILE does not exist -- aborting."
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
for filepath in `ls *.{deb,rpm,gz}`; do
|
|
||||||
filename=`basename $filepath`
|
|
||||||
if [ -n "$NIGHTLY_BUILD" ]; then
|
|
||||||
filename=`echo $filename | sed s/$VERSION/nightly/`
|
|
||||||
filename=`echo $filename | sed s/$VERSION_UNDERSCORED/nightly/`
|
|
||||||
fi
|
|
||||||
AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $filepath s3://influxdb/$filename --acl public-read --region us-east-1
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Upload failed -- aborting".
|
|
||||||
cleanup_exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
else
|
|
||||||
echo "Not publishing packages to S3."
|
|
||||||
fi
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
# All done.
|
|
||||||
|
|
||||||
echo -e "\nPackaging process complete."
|
|
||||||
cleanup_exit 0
|
|
|
@ -1,16 +0,0 @@
|
||||||
# If you modify this, please also make sure to edit init.sh
|
|
||||||
|
|
||||||
[Unit]
|
|
||||||
Description=InfluxDB is an open-source, distributed, time series database
|
|
||||||
After=network.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
User=influxdb
|
|
||||||
Group=influxdb
|
|
||||||
LimitNOFILE=65536
|
|
||||||
EnvironmentFile=-/etc/default/influxdb
|
|
||||||
ExecStart=/opt/influxdb/influxd -config /etc/opt/influxdb/influxdb.conf $INFLUXD_OPTS
|
|
||||||
Restart=on-failure
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
|
@ -1,205 +0,0 @@
|
||||||
#! /usr/bin/env bash
|
|
||||||
|
|
||||||
### BEGIN INIT INFO
|
|
||||||
# Provides: influxd
|
|
||||||
# Required-Start: $all
|
|
||||||
# Required-Stop: $remote_fs $syslog
|
|
||||||
# Default-Start: 2 3 4 5
|
|
||||||
# Default-Stop: 0 1 6
|
|
||||||
# Short-Description: Start influxd at boot time
|
|
||||||
### END INIT INFO
|
|
||||||
|
|
||||||
# If you modify this, please make sure to also edit influxdb.service
|
|
||||||
# this init script supports three different variations:
|
|
||||||
# 1. New lsb that define start-stop-daemon
|
|
||||||
# 2. Old lsb that don't have start-stop-daemon but define, log, pidofproc and killproc
|
|
||||||
# 3. Centos installations without lsb-core installed
|
|
||||||
#
|
|
||||||
# In the third case we have to define our own functions which are very dumb
|
|
||||||
# and expect the args to be positioned correctly.
|
|
||||||
|
|
||||||
# Command-line options that can be set in /etc/default/influxdb. These will override
|
|
||||||
# any config file values. Example: "-join http://1.2.3.4:8086"
|
|
||||||
DEFAULT=/etc/default/influxdb
|
|
||||||
|
|
||||||
# Daemon options
|
|
||||||
INFLUXD_OPTS=
|
|
||||||
|
|
||||||
# Process name ( For display )
|
|
||||||
NAME=influxdb
|
|
||||||
|
|
||||||
# User and group
|
|
||||||
USER=influxdb
|
|
||||||
GROUP=influxdb
|
|
||||||
|
|
||||||
# Daemon name, where is the actual executable
|
|
||||||
# If the daemon is not there, then exit.
|
|
||||||
DAEMON=/opt/influxdb/influxd
|
|
||||||
[ -x $DAEMON ] || exit 5
|
|
||||||
|
|
||||||
# Configuration file
|
|
||||||
CONFIG=/etc/opt/influxdb/influxdb.conf
|
|
||||||
|
|
||||||
# PID file for the daemon
|
|
||||||
PIDFILE=/var/run/influxdb/influxd.pid
|
|
||||||
PIDDIR=`dirname $PIDFILE`
|
|
||||||
if [ ! -d "$PIDDIR" ]; then
|
|
||||||
mkdir -p $PIDDIR
|
|
||||||
chown $GROUP:$USER $PIDDIR
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Max open files
|
|
||||||
OPEN_FILE_LIMIT=65536
|
|
||||||
|
|
||||||
if [ -r /lib/lsb/init-functions ]; then
|
|
||||||
source /lib/lsb/init-functions
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Logging
|
|
||||||
if [ -z "$STDOUT" ]; then
|
|
||||||
STDOUT=/dev/null
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -f "$STDOUT" ]; then
|
|
||||||
mkdir -p $(dirname $STDOUT)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$STDERR" ]; then
|
|
||||||
STDERR=/var/log/influxdb/influxd.log
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -f "$STDERR" ]; then
|
|
||||||
mkdir -p $(dirname $STDERR)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Overwrite init script variables with /etc/default/influxdb values
|
|
||||||
if [ -r $DEFAULT ]; then
|
|
||||||
source $DEFAULT
|
|
||||||
fi
|
|
||||||
|
|
||||||
function pidofproc() {
|
|
||||||
if [ $# -ne 3 ]; then
|
|
||||||
echo "Expected three arguments, e.g. $0 -p pidfile daemon-name"
|
|
||||||
fi
|
|
||||||
|
|
||||||
PID=`pgrep -f $3`
|
|
||||||
local PIDFILE=`cat $2`
|
|
||||||
|
|
||||||
if [ "x$PIDFILE" == "x" ]; then
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "x$PID" != "x" -a "$PIDFILE" == "$PID" ]; then
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
function killproc() {
|
|
||||||
if [ $# -ne 3 ]; then
|
|
||||||
echo "Expected three arguments, e.g. $0 -p pidfile signal"
|
|
||||||
fi
|
|
||||||
|
|
||||||
PID=`cat $2`
|
|
||||||
|
|
||||||
kill -s $3 $PID
|
|
||||||
}
|
|
||||||
|
|
||||||
function log_failure_msg() {
|
|
||||||
echo "$@" "[ FAILED ]"
|
|
||||||
}
|
|
||||||
|
|
||||||
function log_success_msg() {
|
|
||||||
echo "$@" "[ OK ]"
|
|
||||||
}
|
|
||||||
|
|
||||||
case $1 in
|
|
||||||
start)
|
|
||||||
# Check if config file exist
|
|
||||||
if [ ! -r $CONFIG ]; then
|
|
||||||
log_failure_msg "config file doesn't exists"
|
|
||||||
exit 4
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Checked the PID file exists and check the actual status of process
|
|
||||||
if [ -e $PIDFILE ]; then
|
|
||||||
pidofproc -p $PIDFILE $DAEMON > /dev/null 2>&1 && STATUS="0" || STATUS="$?"
|
|
||||||
# If the status is SUCCESS then don't need to start again.
|
|
||||||
if [ "x$STATUS" = "x0" ]; then
|
|
||||||
log_failure_msg "$NAME process is running"
|
|
||||||
exit 0 # Exit
|
|
||||||
fi
|
|
||||||
# if PID file does not exist, check if writable
|
|
||||||
else
|
|
||||||
su -c "touch $PIDFILE" $USER > /dev/null 2>&1
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
log_failure_msg "$PIDFILE not writable, check permissions"
|
|
||||||
exit 5
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Bump the file limits, before launching the daemon. These will carry over to
|
|
||||||
# launched processes.
|
|
||||||
ulimit -n $OPEN_FILE_LIMIT
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
log_failure_msg "set open file limit to $OPEN_FILE_LIMIT"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_success_msg "Starting the process" "$NAME"
|
|
||||||
if which start-stop-daemon > /dev/null 2>&1; then
|
|
||||||
start-stop-daemon --chuid $GROUP:$USER --start --quiet --pidfile $PIDFILE --exec $DAEMON -- -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR &
|
|
||||||
else
|
|
||||||
nohup $DAEMON -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR &
|
|
||||||
fi
|
|
||||||
log_success_msg "$NAME process was started"
|
|
||||||
;;
|
|
||||||
|
|
||||||
stop)
|
|
||||||
# Stop the daemon.
|
|
||||||
if [ -e $PIDFILE ]; then
|
|
||||||
pidofproc -p $PIDFILE $DAEMON > /dev/null 2>&1 && STATUS="0" || STATUS="$?"
|
|
||||||
if [ "$STATUS" = 0 ]; then
|
|
||||||
if killproc -p $PIDFILE SIGTERM && /bin/rm -rf $PIDFILE; then
|
|
||||||
log_success_msg "$NAME process was stopped"
|
|
||||||
else
|
|
||||||
log_failure_msg "$NAME failed to stop service"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_failure_msg "$NAME process is not running"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
|
|
||||||
restart)
|
|
||||||
# Restart the daemon.
|
|
||||||
$0 stop && sleep 2 && $0 start
|
|
||||||
;;
|
|
||||||
|
|
||||||
status)
|
|
||||||
# Check the status of the process.
|
|
||||||
if [ -e $PIDFILE ]; then
|
|
||||||
if pidofproc -p $PIDFILE $DAEMON > /dev/null; then
|
|
||||||
log_success_msg "$NAME Process is running"
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
log_failure_msg "$NAME Process is not running"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_failure_msg "$NAME Process is not running"
|
|
||||||
exit 3
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
|
|
||||||
version)
|
|
||||||
$DAEMON version
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
# For invalid arguments, print the usage message.
|
|
||||||
echo "Usage: $0 {start|stop|restart|status|version}"
|
|
||||||
exit 2
|
|
||||||
;;
|
|
||||||
esac
|
|
|
@ -1,21 +0,0 @@
|
||||||
package admin
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultBindAddress is the default bind address for the HTTP server.
|
|
||||||
DefaultBindAddress = ":8083"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
Enabled bool `toml:"enabled"`
|
|
||||||
BindAddress string `toml:"bind-address"`
|
|
||||||
HttpsEnabled bool `toml:"https-enabled"`
|
|
||||||
HttpsCertificate string `toml:"https-certificate"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewConfig() Config {
|
|
||||||
return Config{
|
|
||||||
BindAddress: DefaultBindAddress,
|
|
||||||
HttpsEnabled: false,
|
|
||||||
HttpsCertificate: "/etc/ssl/influxdb.pem",
|
|
||||||
}
|
|
||||||
}
|
|
32
Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config_test.go
generated
vendored
32
Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config_test.go
generated
vendored
|
@ -1,32 +0,0 @@
|
||||||
package admin_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
"github.com/influxdb/influxdb/services/admin"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConfig_Parse(t *testing.T) {
|
|
||||||
// Parse configuration.
|
|
||||||
var c admin.Config
|
|
||||||
if _, err := toml.Decode(`
|
|
||||||
enabled = true
|
|
||||||
bind-address = ":8083"
|
|
||||||
https-enabled = true
|
|
||||||
https-certificate = "/dev/null"
|
|
||||||
`, &c); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate configuration.
|
|
||||||
if c.Enabled != true {
|
|
||||||
t.Fatalf("unexpected enabled: %v", c.Enabled)
|
|
||||||
} else if c.BindAddress != ":8083" {
|
|
||||||
t.Fatalf("unexpected bind address: %s", c.BindAddress)
|
|
||||||
} else if c.HttpsEnabled != true {
|
|
||||||
t.Fatalf("unexpected https enabled: %v", c.HttpsEnabled)
|
|
||||||
} else if c.HttpsCertificate != "/dev/null" {
|
|
||||||
t.Fatalf("unexpected https certificate: %v", c.HttpsCertificate)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,111 +0,0 @@
|
||||||
package admin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
// Register static assets via statik.
|
|
||||||
_ "github.com/influxdb/influxdb/statik"
|
|
||||||
"github.com/rakyll/statik/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Service manages the listener for an admin endpoint.
|
|
||||||
type Service struct {
|
|
||||||
listener net.Listener
|
|
||||||
addr string
|
|
||||||
https bool
|
|
||||||
cert string
|
|
||||||
err chan error
|
|
||||||
|
|
||||||
logger *log.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewService returns a new instance of Service.
|
|
||||||
func NewService(c Config) *Service {
|
|
||||||
return &Service{
|
|
||||||
addr: c.BindAddress,
|
|
||||||
https: c.HttpsEnabled,
|
|
||||||
cert: c.HttpsCertificate,
|
|
||||||
err: make(chan error),
|
|
||||||
logger: log.New(os.Stderr, "[admin] ", log.LstdFlags),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open starts the service
|
|
||||||
func (s *Service) Open() error {
|
|
||||||
s.logger.Printf("Starting admin service")
|
|
||||||
|
|
||||||
// Open listener.
|
|
||||||
if s.https {
|
|
||||||
cert, err := tls.LoadX509KeyPair(s.cert, s.cert)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
listener, err := tls.Listen("tcp", s.addr, &tls.Config{
|
|
||||||
Certificates: []tls.Certificate{cert},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Println("Listening on HTTPS:", listener.Addr().String())
|
|
||||||
s.listener = listener
|
|
||||||
} else {
|
|
||||||
listener, err := net.Listen("tcp", s.addr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Println("Listening on HTTP:", listener.Addr().String())
|
|
||||||
s.listener = listener
|
|
||||||
}
|
|
||||||
|
|
||||||
// Begin listening for requests in a separate goroutine.
|
|
||||||
go s.serve()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the underlying listener.
|
|
||||||
func (s *Service) Close() error {
|
|
||||||
if s.listener != nil {
|
|
||||||
return s.listener.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogger sets the internal logger to the logger passed in.
|
|
||||||
func (s *Service) SetLogger(l *log.Logger) {
|
|
||||||
s.logger = l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Err returns a channel for fatal errors that occur on the listener.
|
|
||||||
func (s *Service) Err() <-chan error { return s.err }
|
|
||||||
|
|
||||||
// Addr returns the listener's address. Returns nil if listener is closed.
|
|
||||||
func (s *Service) Addr() net.Addr {
|
|
||||||
if s.listener != nil {
|
|
||||||
return s.listener.Addr()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// serve serves the handler from the listener.
|
|
||||||
func (s *Service) serve() {
|
|
||||||
// Instantiate file system from embedded admin.
|
|
||||||
statikFS, err := fs.New()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run file system handler on listener.
|
|
||||||
err = http.Serve(s.listener, http.FileServer(statikFS))
|
|
||||||
if err != nil && !strings.Contains(err.Error(), "closed") {
|
|
||||||
s.err <- fmt.Errorf("listener error: addr=%s, err=%s", s.Addr(), err)
|
|
||||||
}
|
|
||||||
}
|
|
33
Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service_test.go
generated
vendored
33
Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service_test.go
generated
vendored
|
@ -1,33 +0,0 @@
|
||||||
package admin_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/services/admin"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ensure service can serve the root index page of the admin.
|
|
||||||
func TestService_Index(t *testing.T) {
|
|
||||||
// Start service on random port.
|
|
||||||
s := admin.NewService(admin.Config{BindAddress: "127.0.0.1:0"})
|
|
||||||
if err := s.Open(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer s.Close()
|
|
||||||
|
|
||||||
// Request root index page.
|
|
||||||
resp, err := http.Get("http://" + s.Addr().String())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
// Validate status code and body.
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
t.Fatalf("unexpected status: %d", resp.StatusCode)
|
|
||||||
} else if _, err := ioutil.ReadAll(resp.Body); err != nil {
|
|
||||||
t.Fatalf("unable to read body: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
209
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/collectd_test.conf
generated
vendored
209
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/collectd_test.conf
generated
vendored
|
@ -1,209 +0,0 @@
|
||||||
absolute value:ABSOLUTE:0:U
|
|
||||||
apache_bytes value:DERIVE:0:U
|
|
||||||
apache_connections value:GAUGE:0:65535
|
|
||||||
apache_idle_workers value:GAUGE:0:65535
|
|
||||||
apache_requests value:DERIVE:0:U
|
|
||||||
apache_scoreboard value:GAUGE:0:65535
|
|
||||||
ath_nodes value:GAUGE:0:65535
|
|
||||||
ath_stat value:DERIVE:0:U
|
|
||||||
backends value:GAUGE:0:65535
|
|
||||||
bitrate value:GAUGE:0:4294967295
|
|
||||||
bytes value:GAUGE:0:U
|
|
||||||
cache_eviction value:DERIVE:0:U
|
|
||||||
cache_operation value:DERIVE:0:U
|
|
||||||
cache_ratio value:GAUGE:0:100
|
|
||||||
cache_result value:DERIVE:0:U
|
|
||||||
cache_size value:GAUGE:0:U
|
|
||||||
charge value:GAUGE:0:U
|
|
||||||
compression_ratio value:GAUGE:0:2
|
|
||||||
compression uncompressed:DERIVE:0:U, compressed:DERIVE:0:U
|
|
||||||
connections value:DERIVE:0:U
|
|
||||||
conntrack value:GAUGE:0:4294967295
|
|
||||||
contextswitch value:DERIVE:0:U
|
|
||||||
counter value:COUNTER:U:U
|
|
||||||
cpufreq value:GAUGE:0:U
|
|
||||||
cpu value:DERIVE:0:U
|
|
||||||
current_connections value:GAUGE:0:U
|
|
||||||
current_sessions value:GAUGE:0:U
|
|
||||||
current value:GAUGE:U:U
|
|
||||||
delay value:GAUGE:-1000000:1000000
|
|
||||||
derive value:DERIVE:0:U
|
|
||||||
df_complex value:GAUGE:0:U
|
|
||||||
df_inodes value:GAUGE:0:U
|
|
||||||
df used:GAUGE:0:1125899906842623, free:GAUGE:0:1125899906842623
|
|
||||||
disk_latency read:GAUGE:0:U, write:GAUGE:0:U
|
|
||||||
disk_merged read:DERIVE:0:U, write:DERIVE:0:U
|
|
||||||
disk_octets read:DERIVE:0:U, write:DERIVE:0:U
|
|
||||||
disk_ops_complex value:DERIVE:0:U
|
|
||||||
disk_ops read:DERIVE:0:U, write:DERIVE:0:U
|
|
||||||
disk_time read:DERIVE:0:U, write:DERIVE:0:U
|
|
||||||
dns_answer value:DERIVE:0:U
|
|
||||||
dns_notify value:DERIVE:0:U
|
|
||||||
dns_octets queries:DERIVE:0:U, responses:DERIVE:0:U
|
|
||||||
dns_opcode value:DERIVE:0:U
|
|
||||||
dns_qtype_cached value:GAUGE:0:4294967295
|
|
||||||
dns_qtype value:DERIVE:0:U
|
|
||||||
dns_query value:DERIVE:0:U
|
|
||||||
dns_question value:DERIVE:0:U
|
|
||||||
dns_rcode value:DERIVE:0:U
|
|
||||||
dns_reject value:DERIVE:0:U
|
|
||||||
dns_request value:DERIVE:0:U
|
|
||||||
dns_resolver value:DERIVE:0:U
|
|
||||||
dns_response value:DERIVE:0:U
|
|
||||||
dns_transfer value:DERIVE:0:U
|
|
||||||
dns_update value:DERIVE:0:U
|
|
||||||
dns_zops value:DERIVE:0:U
|
|
||||||
duration seconds:GAUGE:0:U
|
|
||||||
email_check value:GAUGE:0:U
|
|
||||||
email_count value:GAUGE:0:U
|
|
||||||
email_size value:GAUGE:0:U
|
|
||||||
entropy value:GAUGE:0:4294967295
|
|
||||||
fanspeed value:GAUGE:0:U
|
|
||||||
file_size value:GAUGE:0:U
|
|
||||||
files value:GAUGE:0:U
|
|
||||||
flow value:GAUGE:0:U
|
|
||||||
fork_rate value:DERIVE:0:U
|
|
||||||
frequency_offset value:GAUGE:-1000000:1000000
|
|
||||||
frequency value:GAUGE:0:U
|
|
||||||
fscache_stat value:DERIVE:0:U
|
|
||||||
gauge value:GAUGE:U:U
|
|
||||||
hash_collisions value:DERIVE:0:U
|
|
||||||
http_request_methods value:DERIVE:0:U
|
|
||||||
http_requests value:DERIVE:0:U
|
|
||||||
http_response_codes value:DERIVE:0:U
|
|
||||||
humidity value:GAUGE:0:100
|
|
||||||
if_collisions value:DERIVE:0:U
|
|
||||||
if_dropped rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
if_errors rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
if_multicast value:DERIVE:0:U
|
|
||||||
if_octets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
if_packets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
if_rx_errors value:DERIVE:0:U
|
|
||||||
if_rx_octets value:DERIVE:0:U
|
|
||||||
if_tx_errors value:DERIVE:0:U
|
|
||||||
if_tx_octets value:DERIVE:0:U
|
|
||||||
invocations value:DERIVE:0:U
|
|
||||||
io_octets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
io_packets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
ipt_bytes value:DERIVE:0:U
|
|
||||||
ipt_packets value:DERIVE:0:U
|
|
||||||
irq value:DERIVE:0:U
|
|
||||||
latency value:GAUGE:0:U
|
|
||||||
links value:GAUGE:0:U
|
|
||||||
load shortterm:GAUGE:0:5000, midterm:GAUGE:0:5000, longterm:GAUGE:0:5000
|
|
||||||
md_disks value:GAUGE:0:U
|
|
||||||
memcached_command value:DERIVE:0:U
|
|
||||||
memcached_connections value:GAUGE:0:U
|
|
||||||
memcached_items value:GAUGE:0:U
|
|
||||||
memcached_octets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
memcached_ops value:DERIVE:0:U
|
|
||||||
memory value:GAUGE:0:281474976710656
|
|
||||||
multimeter value:GAUGE:U:U
|
|
||||||
mutex_operations value:DERIVE:0:U
|
|
||||||
mysql_commands value:DERIVE:0:U
|
|
||||||
mysql_handler value:DERIVE:0:U
|
|
||||||
mysql_locks value:DERIVE:0:U
|
|
||||||
mysql_log_position value:DERIVE:0:U
|
|
||||||
mysql_octets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
nfs_procedure value:DERIVE:0:U
|
|
||||||
nginx_connections value:GAUGE:0:U
|
|
||||||
nginx_requests value:DERIVE:0:U
|
|
||||||
node_octets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
node_rssi value:GAUGE:0:255
|
|
||||||
node_stat value:DERIVE:0:U
|
|
||||||
node_tx_rate value:GAUGE:0:127
|
|
||||||
objects value:GAUGE:0:U
|
|
||||||
operations value:DERIVE:0:U
|
|
||||||
percent value:GAUGE:0:100.1
|
|
||||||
percent_bytes value:GAUGE:0:100.1
|
|
||||||
percent_inodes value:GAUGE:0:100.1
|
|
||||||
pf_counters value:DERIVE:0:U
|
|
||||||
pf_limits value:DERIVE:0:U
|
|
||||||
pf_source value:DERIVE:0:U
|
|
||||||
pf_states value:GAUGE:0:U
|
|
||||||
pf_state value:DERIVE:0:U
|
|
||||||
pg_blks value:DERIVE:0:U
|
|
||||||
pg_db_size value:GAUGE:0:U
|
|
||||||
pg_n_tup_c value:DERIVE:0:U
|
|
||||||
pg_n_tup_g value:GAUGE:0:U
|
|
||||||
pg_numbackends value:GAUGE:0:U
|
|
||||||
pg_scan value:DERIVE:0:U
|
|
||||||
pg_xact value:DERIVE:0:U
|
|
||||||
ping_droprate value:GAUGE:0:100
|
|
||||||
ping_stddev value:GAUGE:0:65535
|
|
||||||
ping value:GAUGE:0:65535
|
|
||||||
players value:GAUGE:0:1000000
|
|
||||||
power value:GAUGE:0:U
|
|
||||||
protocol_counter value:DERIVE:0:U
|
|
||||||
ps_code value:GAUGE:0:9223372036854775807
|
|
||||||
ps_count processes:GAUGE:0:1000000, threads:GAUGE:0:1000000
|
|
||||||
ps_cputime user:DERIVE:0:U, syst:DERIVE:0:U
|
|
||||||
ps_data value:GAUGE:0:9223372036854775807
|
|
||||||
ps_disk_octets read:DERIVE:0:U, write:DERIVE:0:U
|
|
||||||
ps_disk_ops read:DERIVE:0:U, write:DERIVE:0:U
|
|
||||||
ps_pagefaults minflt:DERIVE:0:U, majflt:DERIVE:0:U
|
|
||||||
ps_rss value:GAUGE:0:9223372036854775807
|
|
||||||
ps_stacksize value:GAUGE:0:9223372036854775807
|
|
||||||
ps_state value:GAUGE:0:65535
|
|
||||||
ps_vm value:GAUGE:0:9223372036854775807
|
|
||||||
queue_length value:GAUGE:0:U
|
|
||||||
records value:GAUGE:0:U
|
|
||||||
requests value:GAUGE:0:U
|
|
||||||
response_time value:GAUGE:0:U
|
|
||||||
response_code value:GAUGE:0:U
|
|
||||||
route_etx value:GAUGE:0:U
|
|
||||||
route_metric value:GAUGE:0:U
|
|
||||||
routes value:GAUGE:0:U
|
|
||||||
serial_octets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
signal_noise value:GAUGE:U:0
|
|
||||||
signal_power value:GAUGE:U:0
|
|
||||||
signal_quality value:GAUGE:0:U
|
|
||||||
snr value:GAUGE:0:U
|
|
||||||
spam_check value:GAUGE:0:U
|
|
||||||
spam_score value:GAUGE:U:U
|
|
||||||
spl value:GAUGE:U:U
|
|
||||||
swap_io value:DERIVE:0:U
|
|
||||||
swap value:GAUGE:0:1099511627776
|
|
||||||
tcp_connections value:GAUGE:0:4294967295
|
|
||||||
temperature value:GAUGE:U:U
|
|
||||||
threads value:GAUGE:0:U
|
|
||||||
time_dispersion value:GAUGE:-1000000:1000000
|
|
||||||
timeleft value:GAUGE:0:U
|
|
||||||
time_offset value:GAUGE:-1000000:1000000
|
|
||||||
total_bytes value:DERIVE:0:U
|
|
||||||
total_connections value:DERIVE:0:U
|
|
||||||
total_objects value:DERIVE:0:U
|
|
||||||
total_operations value:DERIVE:0:U
|
|
||||||
total_requests value:DERIVE:0:U
|
|
||||||
total_sessions value:DERIVE:0:U
|
|
||||||
total_threads value:DERIVE:0:U
|
|
||||||
total_time_in_ms value:DERIVE:0:U
|
|
||||||
total_values value:DERIVE:0:U
|
|
||||||
uptime value:GAUGE:0:4294967295
|
|
||||||
users value:GAUGE:0:65535
|
|
||||||
vcl value:GAUGE:0:65535
|
|
||||||
vcpu value:GAUGE:0:U
|
|
||||||
virt_cpu_total value:DERIVE:0:U
|
|
||||||
virt_vcpu value:DERIVE:0:U
|
|
||||||
vmpage_action value:DERIVE:0:U
|
|
||||||
vmpage_faults minflt:DERIVE:0:U, majflt:DERIVE:0:U
|
|
||||||
vmpage_io in:DERIVE:0:U, out:DERIVE:0:U
|
|
||||||
vmpage_number value:GAUGE:0:4294967295
|
|
||||||
volatile_changes value:GAUGE:0:U
|
|
||||||
voltage_threshold value:GAUGE:U:U, threshold:GAUGE:U:U
|
|
||||||
voltage value:GAUGE:U:U
|
|
||||||
vs_memory value:GAUGE:0:9223372036854775807
|
|
||||||
vs_processes value:GAUGE:0:65535
|
|
||||||
vs_threads value:GAUGE:0:65535
|
|
||||||
|
|
||||||
#
|
|
||||||
# Legacy types
|
|
||||||
# (required for the v5 upgrade target)
|
|
||||||
#
|
|
||||||
arc_counts demand_data:COUNTER:0:U, demand_metadata:COUNTER:0:U, prefetch_data:COUNTER:0:U, prefetch_metadata:COUNTER:0:U
|
|
||||||
arc_l2_bytes read:COUNTER:0:U, write:COUNTER:0:U
|
|
||||||
arc_l2_size value:GAUGE:0:U
|
|
||||||
arc_ratio value:GAUGE:0:U
|
|
||||||
arc_size current:GAUGE:0:U, target:GAUGE:0:U, minlimit:GAUGE:0:U, maxlimit:GAUGE:0:U
|
|
||||||
mysql_qcache hits:COUNTER:0:U, inserts:COUNTER:0:U, not_cached:COUNTER:0:U, lowmem_prunes:COUNTER:0:U, queries_in_cache:GAUGE:0:U
|
|
||||||
mysql_threads running:GAUGE:0:U, connected:GAUGE:0:U, cached:GAUGE:0:U, created:COUNTER:0:U
|
|
44
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config.go
generated
vendored
44
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config.go
generated
vendored
|
@ -1,44 +0,0 @@
|
||||||
package collectd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/toml"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultBindAddress = ":25826"
|
|
||||||
|
|
||||||
DefaultDatabase = "collectd"
|
|
||||||
|
|
||||||
DefaultRetentionPolicy = ""
|
|
||||||
|
|
||||||
DefaultBatchSize = 5000
|
|
||||||
|
|
||||||
DefaultBatchDuration = toml.Duration(10 * time.Second)
|
|
||||||
|
|
||||||
DefaultTypesDB = "/usr/share/collectd/types.db"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config represents a configuration for the collectd service.
|
|
||||||
type Config struct {
|
|
||||||
Enabled bool `toml:"enabled"`
|
|
||||||
BindAddress string `toml:"bind-address"`
|
|
||||||
Database string `toml:"database"`
|
|
||||||
RetentionPolicy string `toml:"retention-policy"`
|
|
||||||
BatchSize int `toml:"batch-size"`
|
|
||||||
BatchDuration toml.Duration `toml:"batch-timeout"`
|
|
||||||
TypesDB string `toml:"typesdb"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConfig returns a new instance of Config with defaults.
|
|
||||||
func NewConfig() Config {
|
|
||||||
return Config{
|
|
||||||
BindAddress: DefaultBindAddress,
|
|
||||||
Database: DefaultDatabase,
|
|
||||||
RetentionPolicy: DefaultRetentionPolicy,
|
|
||||||
BatchSize: DefaultBatchSize,
|
|
||||||
BatchDuration: DefaultBatchDuration,
|
|
||||||
TypesDB: DefaultTypesDB,
|
|
||||||
}
|
|
||||||
}
|
|
32
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config_test.go
generated
vendored
32
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config_test.go
generated
vendored
|
@ -1,32 +0,0 @@
|
||||||
package collectd_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
"github.com/influxdb/influxdb/services/collectd"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConfig_Parse(t *testing.T) {
|
|
||||||
// Parse configuration.
|
|
||||||
var c collectd.Config
|
|
||||||
if _, err := toml.Decode(`
|
|
||||||
enabled = true
|
|
||||||
bind-address = ":9000"
|
|
||||||
database = "xxx"
|
|
||||||
typesdb = "yyy"
|
|
||||||
`, &c); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate configuration.
|
|
||||||
if c.Enabled != true {
|
|
||||||
t.Fatalf("unexpected enabled: %v", c.Enabled)
|
|
||||||
} else if c.BindAddress != ":9000" {
|
|
||||||
t.Fatalf("unexpected bind address: %s", c.BindAddress)
|
|
||||||
} else if c.Database != "xxx" {
|
|
||||||
t.Fatalf("unexpected database: %s", c.Database)
|
|
||||||
} else if c.TypesDB != "yyy" {
|
|
||||||
t.Fatalf("unexpected types db: %s", c.TypesDB)
|
|
||||||
}
|
|
||||||
}
|
|
278
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service.go
generated
vendored
278
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service.go
generated
vendored
|
@ -1,278 +0,0 @@
|
||||||
package collectd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/cluster"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
"github.com/kimor79/gollectd"
|
|
||||||
)
|
|
||||||
|
|
||||||
const leaderWaitTimeout = 30 * time.Second
|
|
||||||
|
|
||||||
// pointsWriter is an internal interface to make testing easier.
|
|
||||||
type pointsWriter interface {
|
|
||||||
WritePoints(p *cluster.WritePointsRequest) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// metaStore is an internal interface to make testing easier.
|
|
||||||
type metaStore interface {
|
|
||||||
WaitForLeader(d time.Duration) error
|
|
||||||
CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Service represents a UDP server which receives metrics in collectd's binary
|
|
||||||
// protocol and stores them in InfluxDB.
|
|
||||||
type Service struct {
|
|
||||||
Config *Config
|
|
||||||
MetaStore metaStore
|
|
||||||
PointsWriter pointsWriter
|
|
||||||
Logger *log.Logger
|
|
||||||
|
|
||||||
wg sync.WaitGroup
|
|
||||||
err chan error
|
|
||||||
stop chan struct{}
|
|
||||||
ln *net.UDPConn
|
|
||||||
batcher *tsdb.PointBatcher
|
|
||||||
typesdb gollectd.Types
|
|
||||||
addr net.Addr
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewService returns a new instance of the collectd service.
|
|
||||||
func NewService(c Config) *Service {
|
|
||||||
s := &Service{
|
|
||||||
Config: &c,
|
|
||||||
Logger: log.New(os.Stderr, "[collectd] ", log.LstdFlags),
|
|
||||||
err: make(chan error),
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open starts the service.
|
|
||||||
func (s *Service) Open() error {
|
|
||||||
s.Logger.Printf("Starting collectd service")
|
|
||||||
|
|
||||||
if s.Config.BindAddress == "" {
|
|
||||||
return fmt.Errorf("bind address is blank")
|
|
||||||
} else if s.Config.Database == "" {
|
|
||||||
return fmt.Errorf("database name is blank")
|
|
||||||
} else if s.PointsWriter == nil {
|
|
||||||
return fmt.Errorf("PointsWriter is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.MetaStore.WaitForLeader(leaderWaitTimeout); err != nil {
|
|
||||||
s.Logger.Printf("Failed to detect a cluster leader: %s", err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := s.MetaStore.CreateDatabaseIfNotExists(s.Config.Database); err != nil {
|
|
||||||
s.Logger.Printf("Failed to ensure target database %s exists: %s", s.Config.Database, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.typesdb == nil {
|
|
||||||
// Open collectd types.
|
|
||||||
typesdb, err := gollectd.TypesDBFile(s.Config.TypesDB)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Open(): %s", err)
|
|
||||||
}
|
|
||||||
s.typesdb = typesdb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve our address.
|
|
||||||
addr, err := net.ResolveUDPAddr("udp", s.Config.BindAddress)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to resolve UDP address: %s", err)
|
|
||||||
}
|
|
||||||
s.addr = addr
|
|
||||||
|
|
||||||
// Start listening
|
|
||||||
ln, err := net.ListenUDP("udp", addr)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to listen on UDP: %s", err)
|
|
||||||
}
|
|
||||||
s.ln = ln
|
|
||||||
|
|
||||||
s.Logger.Println("Listening on UDP: ", ln.LocalAddr().String())
|
|
||||||
|
|
||||||
// Start the points batcher.
|
|
||||||
s.batcher = tsdb.NewPointBatcher(s.Config.BatchSize, time.Duration(s.Config.BatchDuration))
|
|
||||||
s.batcher.Start()
|
|
||||||
|
|
||||||
// Create channel and wait group for signalling goroutines to stop.
|
|
||||||
s.stop = make(chan struct{})
|
|
||||||
s.wg.Add(2)
|
|
||||||
|
|
||||||
// Start goroutines that process collectd packets.
|
|
||||||
go s.serve()
|
|
||||||
go s.writePoints()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close stops the service.
|
|
||||||
func (s *Service) Close() error {
|
|
||||||
// Close the connection, and wait for the goroutine to exit.
|
|
||||||
if s.stop != nil {
|
|
||||||
close(s.stop)
|
|
||||||
}
|
|
||||||
if s.ln != nil {
|
|
||||||
s.ln.Close()
|
|
||||||
}
|
|
||||||
if s.batcher != nil {
|
|
||||||
s.batcher.Stop()
|
|
||||||
}
|
|
||||||
s.wg.Wait()
|
|
||||||
|
|
||||||
// Release all remaining resources.
|
|
||||||
s.stop = nil
|
|
||||||
s.ln = nil
|
|
||||||
s.batcher = nil
|
|
||||||
s.Logger.Println("collectd UDP closed")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogger sets the internal logger to the logger passed in.
|
|
||||||
func (s *Service) SetLogger(l *log.Logger) {
|
|
||||||
s.Logger = l
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTypes sets collectd types db.
|
|
||||||
func (s *Service) SetTypes(types string) (err error) {
|
|
||||||
s.typesdb, err = gollectd.TypesDB([]byte(types))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Err returns a channel for fatal errors that occur on go routines.
|
|
||||||
func (s *Service) Err() chan error { return s.err }
|
|
||||||
|
|
||||||
// Addr returns the listener's address. Returns nil if listener is closed.
|
|
||||||
func (s *Service) Addr() net.Addr {
|
|
||||||
return s.ln.LocalAddr()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) serve() {
|
|
||||||
defer s.wg.Done()
|
|
||||||
|
|
||||||
// From https://collectd.org/wiki/index.php/Binary_protocol
|
|
||||||
// 1024 bytes (payload only, not including UDP / IP headers)
|
|
||||||
// In versions 4.0 through 4.7, the receive buffer has a fixed size
|
|
||||||
// of 1024 bytes. When longer packets are received, the trailing data
|
|
||||||
// is simply ignored. Since version 4.8, the buffer size can be
|
|
||||||
// configured. Version 5.0 will increase the default buffer size to
|
|
||||||
// 1452 bytes (the maximum payload size when using UDP/IPv6 over
|
|
||||||
// Ethernet).
|
|
||||||
buffer := make([]byte, 1452)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-s.stop:
|
|
||||||
// We closed the connection, time to go.
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
// Keep processing.
|
|
||||||
}
|
|
||||||
|
|
||||||
n, _, err := s.ln.ReadFromUDP(buffer)
|
|
||||||
if err != nil {
|
|
||||||
s.Logger.Printf("collectd ReadFromUDP error: %s", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if n > 0 {
|
|
||||||
s.handleMessage(buffer[:n])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) handleMessage(buffer []byte) {
|
|
||||||
packets, err := gollectd.Packets(buffer, s.typesdb)
|
|
||||||
if err != nil {
|
|
||||||
s.Logger.Printf("Collectd parse error: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, packet := range *packets {
|
|
||||||
points := Unmarshal(&packet)
|
|
||||||
for _, p := range points {
|
|
||||||
s.batcher.In() <- p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) writePoints() {
|
|
||||||
defer s.wg.Done()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-s.stop:
|
|
||||||
return
|
|
||||||
case batch := <-s.batcher.Out():
|
|
||||||
req := &cluster.WritePointsRequest{
|
|
||||||
Database: s.Config.Database,
|
|
||||||
RetentionPolicy: s.Config.RetentionPolicy,
|
|
||||||
ConsistencyLevel: cluster.ConsistencyLevelAny,
|
|
||||||
Points: batch,
|
|
||||||
}
|
|
||||||
if err := s.PointsWriter.WritePoints(req); err != nil {
|
|
||||||
s.Logger.Printf("failed to write batch: %s", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal translates a collectd packet into InfluxDB data points.
|
|
||||||
func Unmarshal(packet *gollectd.Packet) []tsdb.Point {
|
|
||||||
// Prefer high resolution timestamp.
|
|
||||||
var timestamp time.Time
|
|
||||||
if packet.TimeHR > 0 {
|
|
||||||
// TimeHR is "near" nanosecond measurement, but not exactly nanasecond time
|
|
||||||
// Since we store time in microseconds, we round here (mostly so tests will work easier)
|
|
||||||
sec := packet.TimeHR >> 30
|
|
||||||
// Shifting, masking, and dividing by 1 billion to get nanoseconds.
|
|
||||||
nsec := ((packet.TimeHR & 0x3FFFFFFF) << 30) / 1000 / 1000 / 1000
|
|
||||||
timestamp = time.Unix(int64(sec), int64(nsec)).UTC().Round(time.Microsecond)
|
|
||||||
} else {
|
|
||||||
// If we don't have high resolution time, fall back to basic unix time
|
|
||||||
timestamp = time.Unix(int64(packet.Time), 0).UTC()
|
|
||||||
}
|
|
||||||
|
|
||||||
var points []tsdb.Point
|
|
||||||
for i := range packet.Values {
|
|
||||||
name := fmt.Sprintf("%s_%s", packet.Plugin, packet.Values[i].Name)
|
|
||||||
tags := make(map[string]string)
|
|
||||||
fields := make(map[string]interface{})
|
|
||||||
|
|
||||||
fields["value"] = packet.Values[i].Value
|
|
||||||
|
|
||||||
if packet.Hostname != "" {
|
|
||||||
tags["host"] = packet.Hostname
|
|
||||||
}
|
|
||||||
if packet.PluginInstance != "" {
|
|
||||||
tags["instance"] = packet.PluginInstance
|
|
||||||
}
|
|
||||||
if packet.Type != "" {
|
|
||||||
tags["type"] = packet.Type
|
|
||||||
}
|
|
||||||
if packet.TypeInstance != "" {
|
|
||||||
tags["type_instance"] = packet.TypeInstance
|
|
||||||
}
|
|
||||||
p := tsdb.NewPoint(name, tags, fields, timestamp)
|
|
||||||
|
|
||||||
points = append(points, p)
|
|
||||||
}
|
|
||||||
return points
|
|
||||||
}
|
|
||||||
|
|
||||||
// assert will panic with a given formatted message if the given condition is false.
|
|
||||||
func assert(condition bool, msg string, v ...interface{}) {
|
|
||||||
if !condition {
|
|
||||||
panic(fmt.Sprintf("assert failed: "+msg, v...))
|
|
||||||
}
|
|
||||||
}
|
|
501
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service_test.go
generated
vendored
501
Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service_test.go
generated
vendored
|
@ -1,501 +0,0 @@
|
||||||
package collectd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/cluster"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/toml"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Test that the service checks / creates the target database on startup.
|
|
||||||
func TestService_CreatesDatabase(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
s := newTestService(1, time.Second)
|
|
||||||
|
|
||||||
createDatabaseCalled := false
|
|
||||||
|
|
||||||
ms := &testMetaStore{}
|
|
||||||
ms.CreateDatabaseIfNotExistsFn = func(name string) (*meta.DatabaseInfo, error) {
|
|
||||||
if name != s.Config.Database {
|
|
||||||
t.Errorf("\n\texp = %s\n\tgot = %s\n", s.Config.Database, name)
|
|
||||||
}
|
|
||||||
createDatabaseCalled = true
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
s.Service.MetaStore = ms
|
|
||||||
|
|
||||||
s.Open()
|
|
||||||
s.Close()
|
|
||||||
|
|
||||||
if !createDatabaseCalled {
|
|
||||||
t.Errorf("CreateDatabaseIfNotExists should have been called when the service opened.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that the collectd service correctly batches points by BatchSize.
|
|
||||||
func TestService_BatchSize(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
totalPoints := len(expPoints)
|
|
||||||
|
|
||||||
// Batch sizes that totalTestPoints divide evenly by.
|
|
||||||
batchSizes := []int{1, 2, 13}
|
|
||||||
|
|
||||||
for _, batchSize := range batchSizes {
|
|
||||||
func() {
|
|
||||||
s := newTestService(batchSize, time.Second)
|
|
||||||
|
|
||||||
pointCh := make(chan tsdb.Point)
|
|
||||||
s.MetaStore.CreateDatabaseIfNotExistsFn = func(name string) (*meta.DatabaseInfo, error) { return nil, nil }
|
|
||||||
s.PointsWriter.WritePointsFn = func(req *cluster.WritePointsRequest) error {
|
|
||||||
if len(req.Points) != batchSize {
|
|
||||||
t.Errorf("\n\texp = %d\n\tgot = %d\n", batchSize, len(req.Points))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range req.Points {
|
|
||||||
pointCh <- p
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.Open(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer func() { t.Log("closing service"); s.Close() }()
|
|
||||||
|
|
||||||
// Get the address & port the service is listening on for collectd data.
|
|
||||||
addr := s.Addr()
|
|
||||||
conn, err := net.Dial("udp", addr.String())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send the test data to the service.
|
|
||||||
if n, err := conn.Write(testData); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if n != len(testData) {
|
|
||||||
t.Fatalf("only sent %d of %d bytes", n, len(testData))
|
|
||||||
}
|
|
||||||
|
|
||||||
points := []tsdb.Point{}
|
|
||||||
Loop:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case p := <-pointCh:
|
|
||||||
points = append(points, p)
|
|
||||||
if len(points) == totalPoints {
|
|
||||||
break Loop
|
|
||||||
}
|
|
||||||
case <-time.After(time.Second):
|
|
||||||
t.Logf("exp %d points, got %d", totalPoints, len(points))
|
|
||||||
t.Fatal("timed out waiting for points from collectd service")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(points) != totalPoints {
|
|
||||||
t.Fatalf("exp %d points, got %d", totalPoints, len(points))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, exp := range expPoints {
|
|
||||||
got := points[i].String()
|
|
||||||
if got != exp {
|
|
||||||
t.Fatalf("\n\texp = %s\n\tgot = %s\n", exp, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that the collectd service correctly batches points using BatchDuration.
|
|
||||||
func TestService_BatchDuration(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
totalPoints := len(expPoints)
|
|
||||||
|
|
||||||
s := newTestService(5000, 250*time.Millisecond)
|
|
||||||
|
|
||||||
pointCh := make(chan tsdb.Point, 1000)
|
|
||||||
s.MetaStore.CreateDatabaseIfNotExistsFn = func(name string) (*meta.DatabaseInfo, error) { return nil, nil }
|
|
||||||
s.PointsWriter.WritePointsFn = func(req *cluster.WritePointsRequest) error {
|
|
||||||
for _, p := range req.Points {
|
|
||||||
pointCh <- p
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.Open(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer func() { t.Log("closing service"); s.Close() }()
|
|
||||||
|
|
||||||
// Get the address & port the service is listening on for collectd data.
|
|
||||||
addr := s.Addr()
|
|
||||||
conn, err := net.Dial("udp", addr.String())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send the test data to the service.
|
|
||||||
if n, err := conn.Write(testData); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
} else if n != len(testData) {
|
|
||||||
t.Fatalf("only sent %d of %d bytes", n, len(testData))
|
|
||||||
}
|
|
||||||
|
|
||||||
points := []tsdb.Point{}
|
|
||||||
Loop:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case p := <-pointCh:
|
|
||||||
points = append(points, p)
|
|
||||||
if len(points) == totalPoints {
|
|
||||||
break Loop
|
|
||||||
}
|
|
||||||
case <-time.After(time.Second):
|
|
||||||
t.Logf("exp %d points, got %d", totalPoints, len(points))
|
|
||||||
t.Fatal("timed out waiting for points from collectd service")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(points) != totalPoints {
|
|
||||||
t.Fatalf("exp %d points, got %d", totalPoints, len(points))
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, exp := range expPoints {
|
|
||||||
got := points[i].String()
|
|
||||||
if got != exp {
|
|
||||||
t.Fatalf("\n\texp = %s\n\tgot = %s\n", exp, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type testService struct {
|
|
||||||
*Service
|
|
||||||
MetaStore testMetaStore
|
|
||||||
PointsWriter testPointsWriter
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTestService(batchSize int, batchDuration time.Duration) *testService {
|
|
||||||
s := &testService{
|
|
||||||
Service: NewService(Config{
|
|
||||||
BindAddress: "127.0.0.1:0",
|
|
||||||
Database: "collectd_test",
|
|
||||||
BatchSize: batchSize,
|
|
||||||
BatchDuration: toml.Duration(batchDuration),
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
s.Service.PointsWriter = &s.PointsWriter
|
|
||||||
s.Service.MetaStore = &s.MetaStore
|
|
||||||
|
|
||||||
// Set the collectd types using test string.
|
|
||||||
if err := s.SetTypes(typesDBText); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !testing.Verbose() {
|
|
||||||
s.Logger = log.New(ioutil.Discard, "", log.LstdFlags)
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
type testPointsWriter struct {
|
|
||||||
WritePointsFn func(*cluster.WritePointsRequest) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *testPointsWriter) WritePoints(p *cluster.WritePointsRequest) error {
|
|
||||||
return w.WritePointsFn(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
type testMetaStore struct {
|
|
||||||
CreateDatabaseIfNotExistsFn func(name string) (*meta.DatabaseInfo, error)
|
|
||||||
//DatabaseFn func(name string) (*meta.DatabaseInfo, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *testMetaStore) CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error) {
|
|
||||||
return ms.CreateDatabaseIfNotExistsFn(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *testMetaStore) WaitForLeader(d time.Duration) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func wait(c chan struct{}, d time.Duration) (err error) {
|
|
||||||
select {
|
|
||||||
case <-c:
|
|
||||||
case <-time.After(d):
|
|
||||||
err = errors.New("timed out")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitInt(c chan int, d time.Duration) (i int, err error) {
|
|
||||||
select {
|
|
||||||
case i = <-c:
|
|
||||||
case <-time.After(d):
|
|
||||||
err = errors.New("timed out")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func check(err error) {
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Raw data sent by collectd, captured using Wireshark.
|
|
||||||
var testData = func() []byte {
|
|
||||||
b, err := hex.DecodeString("000000167066312d36322d3231302d39342d313733000001000c00000000544928ff0007000c00000000000000050002000c656e74726f7079000004000c656e74726f7079000006000f0001010000000000007240000200086370750000030006310000040008637075000005000969646c65000006000f0001000000000000a674620005000977616974000006000f0001000000000000000000000200076466000003000500000400076466000005000d6c6976652d636f7700000600180002010100000000a090b641000000a0cb6a2742000200086370750000030006310000040008637075000005000e696e74657272757074000006000f00010000000000000000fe0005000c736f6674697271000006000f000100000000000000000000020007646600000300050000040007646600000500096c6976650000060018000201010000000000000000000000e0ec972742000200086370750000030006310000040008637075000005000a737465616c000006000f00010000000000000000000003000632000005000975736572000006000f0001000000000000005f36000500096e696365000006000f0001000000000000000ad80002000e696e746572666163650000030005000004000e69665f6f6374657473000005000b64756d6d79300000060018000200000000000000000000000000000000041a000200076466000004000764660000050008746d70000006001800020101000000000000f240000000a0ea972742000200086370750000030006320000040008637075000005000b73797374656d000006000f00010000000000000045d30002000e696e746572666163650000030005000004000f69665f7061636b657473000005000b64756d6d79300000060018000200000000000000000000000000000000000f000200086370750000030006320000040008637075000005000969646c65000006000f0001000000000000a66480000200076466000003000500000400076466000005000d72756e2d6c6f636b000006001800020101000000000000000000000000000054410002000e696e74657266616365000004000e69665f6572726f7273000005000b64756d6d793000000600180002000000000000000000000000000000000000000200086370750000030006320000040008637075000005000977616974000006000f00010000000000000000000005000e696e74657272757074000006000f0001000000000000000132")
|
|
||||||
check(err)
|
|
||||||
return b
|
|
||||||
}()
|
|
||||||
|
|
||||||
var expPoints = []string{
|
|
||||||
"entropy_value,host=pf1-62-210-94-173,type=entropy value=288.0 1414080767000000000",
|
|
||||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=idle value=10908770.0 1414080767000000000",
|
|
||||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=wait value=0.0 1414080767000000000",
|
|
||||||
"df_used,host=pf1-62-210-94-173,type=df,type_instance=live-cow value=378576896.0 1414080767000000000",
|
|
||||||
"df_free,host=pf1-62-210-94-173,type=df,type_instance=live-cow value=50287988736.0 1414080767000000000",
|
|
||||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=interrupt value=254.0 1414080767000000000",
|
|
||||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=softirq value=0.0 1414080767000000000",
|
|
||||||
"df_used,host=pf1-62-210-94-173,type=df,type_instance=live value=0.0 1414080767000000000",
|
|
||||||
"df_free,host=pf1-62-210-94-173,type=df,type_instance=live value=50666565632.0 1414080767000000000",
|
|
||||||
"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=steal value=0.0 1414080767000000000",
|
|
||||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=user value=24374.0 1414080767000000000",
|
|
||||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=nice value=2776.0 1414080767000000000",
|
|
||||||
"interface_rx,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 value=0.0 1414080767000000000",
|
|
||||||
"interface_tx,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 value=1050.0 1414080767000000000",
|
|
||||||
"df_used,host=pf1-62-210-94-173,type=df,type_instance=tmp value=73728.0 1414080767000000000",
|
|
||||||
"df_free,host=pf1-62-210-94-173,type=df,type_instance=tmp value=50666491904.0 1414080767000000000",
|
|
||||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=system value=17875.0 1414080767000000000",
|
|
||||||
"interface_rx,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 value=0.0 1414080767000000000",
|
|
||||||
"interface_tx,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 value=15.0 1414080767000000000",
|
|
||||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=idle value=10904704.0 1414080767000000000",
|
|
||||||
"df_used,host=pf1-62-210-94-173,type=df,type_instance=run-lock value=0.0 1414080767000000000",
|
|
||||||
"df_free,host=pf1-62-210-94-173,type=df,type_instance=run-lock value=5242880.0 1414080767000000000",
|
|
||||||
"interface_rx,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 value=0.0 1414080767000000000",
|
|
||||||
"interface_tx,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 value=0.0 1414080767000000000",
|
|
||||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=wait value=0.0 1414080767000000000",
|
|
||||||
"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=interrupt value=306.0 1414080767000000000",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Taken from /usr/share/collectd/types.db on a Ubuntu system
|
|
||||||
var typesDBText = `
|
|
||||||
absolute value:ABSOLUTE:0:U
|
|
||||||
apache_bytes value:DERIVE:0:U
|
|
||||||
apache_connections value:GAUGE:0:65535
|
|
||||||
apache_idle_workers value:GAUGE:0:65535
|
|
||||||
apache_requests value:DERIVE:0:U
|
|
||||||
apache_scoreboard value:GAUGE:0:65535
|
|
||||||
ath_nodes value:GAUGE:0:65535
|
|
||||||
ath_stat value:DERIVE:0:U
|
|
||||||
backends value:GAUGE:0:65535
|
|
||||||
bitrate value:GAUGE:0:4294967295
|
|
||||||
bytes value:GAUGE:0:U
|
|
||||||
cache_eviction value:DERIVE:0:U
|
|
||||||
cache_operation value:DERIVE:0:U
|
|
||||||
cache_ratio value:GAUGE:0:100
|
|
||||||
cache_result value:DERIVE:0:U
|
|
||||||
cache_size value:GAUGE:0:4294967295
|
|
||||||
charge value:GAUGE:0:U
|
|
||||||
compression_ratio value:GAUGE:0:2
|
|
||||||
compression uncompressed:DERIVE:0:U, compressed:DERIVE:0:U
|
|
||||||
connections value:DERIVE:0:U
|
|
||||||
conntrack value:GAUGE:0:4294967295
|
|
||||||
contextswitch value:DERIVE:0:U
|
|
||||||
counter value:COUNTER:U:U
|
|
||||||
cpufreq value:GAUGE:0:U
|
|
||||||
cpu value:DERIVE:0:U
|
|
||||||
current_connections value:GAUGE:0:U
|
|
||||||
current_sessions value:GAUGE:0:U
|
|
||||||
current value:GAUGE:U:U
|
|
||||||
delay value:GAUGE:-1000000:1000000
|
|
||||||
derive value:DERIVE:0:U
|
|
||||||
df_complex value:GAUGE:0:U
|
|
||||||
df_inodes value:GAUGE:0:U
|
|
||||||
df used:GAUGE:0:1125899906842623, free:GAUGE:0:1125899906842623
|
|
||||||
disk_latency read:GAUGE:0:U, write:GAUGE:0:U
|
|
||||||
disk_merged read:DERIVE:0:U, write:DERIVE:0:U
|
|
||||||
disk_octets read:DERIVE:0:U, write:DERIVE:0:U
|
|
||||||
disk_ops_complex value:DERIVE:0:U
|
|
||||||
disk_ops read:DERIVE:0:U, write:DERIVE:0:U
|
|
||||||
disk_time read:DERIVE:0:U, write:DERIVE:0:U
|
|
||||||
dns_answer value:DERIVE:0:U
|
|
||||||
dns_notify value:DERIVE:0:U
|
|
||||||
dns_octets queries:DERIVE:0:U, responses:DERIVE:0:U
|
|
||||||
dns_opcode value:DERIVE:0:U
|
|
||||||
dns_qtype_cached value:GAUGE:0:4294967295
|
|
||||||
dns_qtype value:DERIVE:0:U
|
|
||||||
dns_query value:DERIVE:0:U
|
|
||||||
dns_question value:DERIVE:0:U
|
|
||||||
dns_rcode value:DERIVE:0:U
|
|
||||||
dns_reject value:DERIVE:0:U
|
|
||||||
dns_request value:DERIVE:0:U
|
|
||||||
dns_resolver value:DERIVE:0:U
|
|
||||||
dns_response value:DERIVE:0:U
|
|
||||||
dns_transfer value:DERIVE:0:U
|
|
||||||
dns_update value:DERIVE:0:U
|
|
||||||
dns_zops value:DERIVE:0:U
|
|
||||||
duration seconds:GAUGE:0:U
|
|
||||||
email_check value:GAUGE:0:U
|
|
||||||
email_count value:GAUGE:0:U
|
|
||||||
email_size value:GAUGE:0:U
|
|
||||||
entropy value:GAUGE:0:4294967295
|
|
||||||
fanspeed value:GAUGE:0:U
|
|
||||||
file_size value:GAUGE:0:U
|
|
||||||
files value:GAUGE:0:U
|
|
||||||
fork_rate value:DERIVE:0:U
|
|
||||||
frequency_offset value:GAUGE:-1000000:1000000
|
|
||||||
frequency value:GAUGE:0:U
|
|
||||||
fscache_stat value:DERIVE:0:U
|
|
||||||
gauge value:GAUGE:U:U
|
|
||||||
hash_collisions value:DERIVE:0:U
|
|
||||||
http_request_methods value:DERIVE:0:U
|
|
||||||
http_requests value:DERIVE:0:U
|
|
||||||
http_response_codes value:DERIVE:0:U
|
|
||||||
humidity value:GAUGE:0:100
|
|
||||||
if_collisions value:DERIVE:0:U
|
|
||||||
if_dropped rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
if_errors rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
if_multicast value:DERIVE:0:U
|
|
||||||
if_octets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
if_packets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
if_rx_errors value:DERIVE:0:U
|
|
||||||
if_rx_octets value:DERIVE:0:U
|
|
||||||
if_tx_errors value:DERIVE:0:U
|
|
||||||
if_tx_octets value:DERIVE:0:U
|
|
||||||
invocations value:DERIVE:0:U
|
|
||||||
io_octets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
io_packets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
ipt_bytes value:DERIVE:0:U
|
|
||||||
ipt_packets value:DERIVE:0:U
|
|
||||||
irq value:DERIVE:0:U
|
|
||||||
latency value:GAUGE:0:U
|
|
||||||
links value:GAUGE:0:U
|
|
||||||
load shortterm:GAUGE:0:5000, midterm:GAUGE:0:5000, longterm:GAUGE:0:5000
|
|
||||||
md_disks value:GAUGE:0:U
|
|
||||||
memcached_command value:DERIVE:0:U
|
|
||||||
memcached_connections value:GAUGE:0:U
|
|
||||||
memcached_items value:GAUGE:0:U
|
|
||||||
memcached_octets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
memcached_ops value:DERIVE:0:U
|
|
||||||
memory value:GAUGE:0:281474976710656
|
|
||||||
multimeter value:GAUGE:U:U
|
|
||||||
mutex_operations value:DERIVE:0:U
|
|
||||||
mysql_commands value:DERIVE:0:U
|
|
||||||
mysql_handler value:DERIVE:0:U
|
|
||||||
mysql_locks value:DERIVE:0:U
|
|
||||||
mysql_log_position value:DERIVE:0:U
|
|
||||||
mysql_octets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
nfs_procedure value:DERIVE:0:U
|
|
||||||
nginx_connections value:GAUGE:0:U
|
|
||||||
nginx_requests value:DERIVE:0:U
|
|
||||||
node_octets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
node_rssi value:GAUGE:0:255
|
|
||||||
node_stat value:DERIVE:0:U
|
|
||||||
node_tx_rate value:GAUGE:0:127
|
|
||||||
objects value:GAUGE:0:U
|
|
||||||
operations value:DERIVE:0:U
|
|
||||||
percent value:GAUGE:0:100.1
|
|
||||||
percent_bytes value:GAUGE:0:100.1
|
|
||||||
percent_inodes value:GAUGE:0:100.1
|
|
||||||
pf_counters value:DERIVE:0:U
|
|
||||||
pf_limits value:DERIVE:0:U
|
|
||||||
pf_source value:DERIVE:0:U
|
|
||||||
pf_states value:GAUGE:0:U
|
|
||||||
pf_state value:DERIVE:0:U
|
|
||||||
pg_blks value:DERIVE:0:U
|
|
||||||
pg_db_size value:GAUGE:0:U
|
|
||||||
pg_n_tup_c value:DERIVE:0:U
|
|
||||||
pg_n_tup_g value:GAUGE:0:U
|
|
||||||
pg_numbackends value:GAUGE:0:U
|
|
||||||
pg_scan value:DERIVE:0:U
|
|
||||||
pg_xact value:DERIVE:0:U
|
|
||||||
ping_droprate value:GAUGE:0:100
|
|
||||||
ping_stddev value:GAUGE:0:65535
|
|
||||||
ping value:GAUGE:0:65535
|
|
||||||
players value:GAUGE:0:1000000
|
|
||||||
power value:GAUGE:0:U
|
|
||||||
protocol_counter value:DERIVE:0:U
|
|
||||||
ps_code value:GAUGE:0:9223372036854775807
|
|
||||||
ps_count processes:GAUGE:0:1000000, threads:GAUGE:0:1000000
|
|
||||||
ps_cputime user:DERIVE:0:U, syst:DERIVE:0:U
|
|
||||||
ps_data value:GAUGE:0:9223372036854775807
|
|
||||||
ps_disk_octets read:DERIVE:0:U, write:DERIVE:0:U
|
|
||||||
ps_disk_ops read:DERIVE:0:U, write:DERIVE:0:U
|
|
||||||
ps_pagefaults minflt:DERIVE:0:U, majflt:DERIVE:0:U
|
|
||||||
ps_rss value:GAUGE:0:9223372036854775807
|
|
||||||
ps_stacksize value:GAUGE:0:9223372036854775807
|
|
||||||
ps_state value:GAUGE:0:65535
|
|
||||||
ps_vm value:GAUGE:0:9223372036854775807
|
|
||||||
queue_length value:GAUGE:0:U
|
|
||||||
records value:GAUGE:0:U
|
|
||||||
requests value:GAUGE:0:U
|
|
||||||
response_time value:GAUGE:0:U
|
|
||||||
response_code value:GAUGE:0:U
|
|
||||||
route_etx value:GAUGE:0:U
|
|
||||||
route_metric value:GAUGE:0:U
|
|
||||||
routes value:GAUGE:0:U
|
|
||||||
serial_octets rx:DERIVE:0:U, tx:DERIVE:0:U
|
|
||||||
signal_noise value:GAUGE:U:0
|
|
||||||
signal_power value:GAUGE:U:0
|
|
||||||
signal_quality value:GAUGE:0:U
|
|
||||||
snr value:GAUGE:0:U
|
|
||||||
spam_check value:GAUGE:0:U
|
|
||||||
spam_score value:GAUGE:U:U
|
|
||||||
spl value:GAUGE:U:U
|
|
||||||
swap_io value:DERIVE:0:U
|
|
||||||
swap value:GAUGE:0:1099511627776
|
|
||||||
tcp_connections value:GAUGE:0:4294967295
|
|
||||||
temperature value:GAUGE:U:U
|
|
||||||
threads value:GAUGE:0:U
|
|
||||||
time_dispersion value:GAUGE:-1000000:1000000
|
|
||||||
timeleft value:GAUGE:0:U
|
|
||||||
time_offset value:GAUGE:-1000000:1000000
|
|
||||||
total_bytes value:DERIVE:0:U
|
|
||||||
total_connections value:DERIVE:0:U
|
|
||||||
total_objects value:DERIVE:0:U
|
|
||||||
total_operations value:DERIVE:0:U
|
|
||||||
total_requests value:DERIVE:0:U
|
|
||||||
total_sessions value:DERIVE:0:U
|
|
||||||
total_threads value:DERIVE:0:U
|
|
||||||
total_time_in_ms value:DERIVE:0:U
|
|
||||||
total_values value:DERIVE:0:U
|
|
||||||
uptime value:GAUGE:0:4294967295
|
|
||||||
users value:GAUGE:0:65535
|
|
||||||
vcl value:GAUGE:0:65535
|
|
||||||
vcpu value:GAUGE:0:U
|
|
||||||
virt_cpu_total value:DERIVE:0:U
|
|
||||||
virt_vcpu value:DERIVE:0:U
|
|
||||||
vmpage_action value:DERIVE:0:U
|
|
||||||
vmpage_faults minflt:DERIVE:0:U, majflt:DERIVE:0:U
|
|
||||||
vmpage_io in:DERIVE:0:U, out:DERIVE:0:U
|
|
||||||
vmpage_number value:GAUGE:0:4294967295
|
|
||||||
volatile_changes value:GAUGE:0:U
|
|
||||||
voltage_threshold value:GAUGE:U:U, threshold:GAUGE:U:U
|
|
||||||
voltage value:GAUGE:U:U
|
|
||||||
vs_memory value:GAUGE:0:9223372036854775807
|
|
||||||
vs_processes value:GAUGE:0:65535
|
|
||||||
vs_threads value:GAUGE:0:65535
|
|
||||||
#
|
|
||||||
# Legacy types
|
|
||||||
# (required for the v5 upgrade target)
|
|
||||||
#
|
|
||||||
arc_counts demand_data:COUNTER:0:U, demand_metadata:COUNTER:0:U, prefetch_data:COUNTER:0:U, prefetch_metadata:COUNTER:0:U
|
|
||||||
arc_l2_bytes read:COUNTER:0:U, write:COUNTER:0:U
|
|
||||||
arc_l2_size value:GAUGE:0:U
|
|
||||||
arc_ratio value:GAUGE:0:U
|
|
||||||
arc_size current:GAUGE:0:U, target:GAUGE:0:U, minlimit:GAUGE:0:U, maxlimit:GAUGE:0:U
|
|
||||||
mysql_qcache hits:COUNTER:0:U, inserts:COUNTER:0:U, not_cached:COUNTER:0:U, lowmem_prunes:COUNTER:0:U, queries_in_cache:GAUGE:0:U
|
|
||||||
mysql_threads running:GAUGE:0:U, connected:GAUGE:0:U, cached:GAUGE:0:U, created:COUNTER:0:U
|
|
||||||
`
|
|
|
@ -1,3 +0,0 @@
|
||||||
collectD Client
|
|
||||||
============
|
|
||||||
This directory contains code for generating collectd load.
|
|
|
@ -1,71 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"collectd.org/api"
|
|
||||||
"collectd.org/network"
|
|
||||||
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var nMeasurments = flag.Int("m", 1, "Number of measurements")
|
|
||||||
var tagVariance = flag.Int("v", 1, "Number of values per tag. Client is fixed at one tag")
|
|
||||||
var rate = flag.Int("r", 1, "Number of points per second")
|
|
||||||
var total = flag.Int("t", -1, "Total number of points to send (default is no limit)")
|
|
||||||
var host = flag.String("u", "127.0.0.1:25826", "Destination host in the form host:port")
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
conn, err := network.Dial(*host, network.ClientOptions{})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
rateLimiter := make(chan int, *rate)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
ticker := time.NewTicker(time.Second)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
for i := 0; i < *rate; i++ {
|
|
||||||
rateLimiter <- i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
nSent := 0
|
|
||||||
for {
|
|
||||||
if nSent >= *total && *total > 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
<-rateLimiter
|
|
||||||
|
|
||||||
vl := api.ValueList{
|
|
||||||
Identifier: api.Identifier{
|
|
||||||
Host: "tagvalue" + strconv.Itoa(int(rand.Int31n(int32(*tagVariance)))),
|
|
||||||
Plugin: "golang" + strconv.Itoa(int(rand.Int31n(int32(*nMeasurments)))),
|
|
||||||
Type: "gauge",
|
|
||||||
},
|
|
||||||
Time: time.Now(),
|
|
||||||
Interval: 10 * time.Second,
|
|
||||||
Values: []api.Value{api.Gauge(42.0)},
|
|
||||||
}
|
|
||||||
if err := conn.Write(vl); err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
conn.Flush()
|
|
||||||
nSent = nSent + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("Number of points sent:", nSent)
|
|
||||||
}
|
|
65
Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config.go
generated
vendored
65
Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config.go
generated
vendored
|
@ -1,65 +0,0 @@
|
||||||
package continuous_querier
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/toml"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultRecomputePreviousN = 2
|
|
||||||
|
|
||||||
DefaultRecomputeNoOlderThan = 10 * time.Minute
|
|
||||||
|
|
||||||
DefaultComputeRunsPerInterval = 10
|
|
||||||
|
|
||||||
DefaultComputeNoMoreThan = 2 * time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config represents a configuration for the continuous query service.
|
|
||||||
type Config struct {
|
|
||||||
// Enables logging in CQ service to display when CQ's are processed and how many points are wrote.
|
|
||||||
LogEnabled bool `toml:"log-enabled"`
|
|
||||||
|
|
||||||
// If this flag is set to false, both the brokers and data nodes should ignore any CQ processing.
|
|
||||||
Enabled bool `toml:"enabled"`
|
|
||||||
|
|
||||||
// when continuous queries are run we'll automatically recompute previous intervals
|
|
||||||
// in case lagged data came in. Set to zero if you never have lagged data. We do
|
|
||||||
// it this way because invalidating previously computed intervals would be insanely hard
|
|
||||||
// and expensive.
|
|
||||||
RecomputePreviousN int `toml:"recompute-previous-n"`
|
|
||||||
|
|
||||||
// The RecomputePreviousN setting provides guidance for how far back to recompute, the RecomputeNoOlderThan
|
|
||||||
// setting sets a ceiling on how far back in time it will go. For example, if you have 2 PreviousN
|
|
||||||
// and have this set to 10m, then we'd only compute the previous two intervals for any
|
|
||||||
// CQs that have a group by time <= 5m. For all others, we'd only recompute the previous window
|
|
||||||
RecomputeNoOlderThan toml.Duration `toml:"recompute-no-older-than"`
|
|
||||||
|
|
||||||
// ComputeRunsPerInterval will determine how many times the current and previous N intervals
|
|
||||||
// will be computed. The group by time will be divided by this and it will get computed this many times:
|
|
||||||
// group by time seconds / runs per interval
|
|
||||||
// This will give partial results for current group by intervals and will determine how long it will
|
|
||||||
// be until lagged data is recomputed. For example, if this number is 10 and the group by time is 10m, it
|
|
||||||
// will be a minute past the previous 10m bucket of time before lagged data is picked up
|
|
||||||
ComputeRunsPerInterval int `toml:"compute-runs-per-interval"`
|
|
||||||
|
|
||||||
// ComputeNoMoreThan paired with the RunsPerInterval will determine the ceiling of how many times smaller
|
|
||||||
// group by times will be computed. For example, if you have RunsPerInterval set to 10 and this setting
|
|
||||||
// to 1m. Then for a group by time(1m) will actually only get computed once per interval (and once per PreviousN).
|
|
||||||
// If you have a group by time(5m) then you'll get five computes per interval. Any group by time window larger
|
|
||||||
// than 10m will get computed 10 times for each interval.
|
|
||||||
ComputeNoMoreThan toml.Duration `toml:"compute-no-more-than"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConfig returns a new instance of Config with defaults.
|
|
||||||
func NewConfig() Config {
|
|
||||||
return Config{
|
|
||||||
LogEnabled: true,
|
|
||||||
Enabled: true,
|
|
||||||
RecomputePreviousN: DefaultRecomputePreviousN,
|
|
||||||
RecomputeNoOlderThan: toml.Duration(DefaultRecomputeNoOlderThan),
|
|
||||||
ComputeRunsPerInterval: DefaultComputeRunsPerInterval,
|
|
||||||
ComputeNoMoreThan: toml.Duration(DefaultComputeNoMoreThan),
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,36 +0,0 @@
|
||||||
package continuous_querier_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
"github.com/influxdb/influxdb/services/continuous_querier"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConfig_Parse(t *testing.T) {
|
|
||||||
// Parse configuration.
|
|
||||||
var c continuous_querier.Config
|
|
||||||
if _, err := toml.Decode(`
|
|
||||||
recompute-previous-n = 1
|
|
||||||
recompute-no-older-than = "10s"
|
|
||||||
compute-runs-per-interval = 2
|
|
||||||
compute-no-more-than = "20s"
|
|
||||||
enabled = true
|
|
||||||
`, &c); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate configuration.
|
|
||||||
if c.RecomputePreviousN != 1 {
|
|
||||||
t.Fatalf("unexpected recompute previous n: %d", c.RecomputePreviousN)
|
|
||||||
} else if time.Duration(c.RecomputeNoOlderThan) != 10*time.Second {
|
|
||||||
t.Fatalf("unexpected recompute no older than: %v", c.RecomputeNoOlderThan)
|
|
||||||
} else if c.ComputeRunsPerInterval != 2 {
|
|
||||||
t.Fatalf("unexpected compute runs per interval: %d", c.ComputeRunsPerInterval)
|
|
||||||
} else if time.Duration(c.ComputeNoMoreThan) != 20*time.Second {
|
|
||||||
t.Fatalf("unexpected compute no more than: %v", c.ComputeNoMoreThan)
|
|
||||||
} else if c.Enabled != true {
|
|
||||||
t.Fatalf("unexpected enabled: %v", c.Enabled)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,236 +0,0 @@
|
||||||
# Continuous Queries
|
|
||||||
|
|
||||||
This document lays out continuous queries and a proposed architecture for how they'll work within an InfluxDB cluster.
|
|
||||||
|
|
||||||
## Definition of Continuous Queries
|
|
||||||
|
|
||||||
Continuous queries serve two purposes in InfluxDB:
|
|
||||||
|
|
||||||
1. Combining many series into a single series (i.e. removing 1 or more tag dimensions to make queries more efficient)
|
|
||||||
2. Aggregating and downsampling series
|
|
||||||
|
|
||||||
The purpose of both types of continuous queries is to duplicate or downsample data automatically in the background to make querying thier results fast and efficient. Think of them as another way to create indexes on data.
|
|
||||||
|
|
||||||
Generally, there are continuous queries that create copyies of data into another measurement or tagset and queries that downsample and aggregate data. The only difference between the two types is if the query has a `GROUP BY time` clause.
|
|
||||||
|
|
||||||
Before we get to the continuous query examples, we need to define the `INTO` syntax of queries.
|
|
||||||
|
|
||||||
### INTO
|
|
||||||
|
|
||||||
`INTO` is a method for running a query and having it output into either another measurement name, retention policy, or database. The syntax looks like this:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SELECT *
|
|
||||||
INTO [<retention policy>.]<measurement> [ON <database>]
|
|
||||||
FROM <measurement>
|
|
||||||
[WHERE ...]
|
|
||||||
[GROUP BY ...]
|
|
||||||
```
|
|
||||||
|
|
||||||
The syntax states that the retention policy, database, where clause, and group by clause are all optional. If a retention policy isn't specified, the database's default retention policy will be written into. If the database isn't specified, the database the query is running from will be written into.
|
|
||||||
|
|
||||||
By selecting specific fields, `INTO` can merge many series into one that will go into a new either a new measurement, retention policy, or database. For example:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
SELECT mean(value) as value, region
|
|
||||||
INTO "1h.cpu_load"
|
|
||||||
FROM cpu_load
|
|
||||||
GROUP BY time(1h), region
|
|
||||||
```
|
|
||||||
|
|
||||||
That will give 1h summaries of the mean value of the `cpu_load` for each `region`. Specifying `region` in the `GROUP BY` clause is unnecessary since having it in the `SELECT` clause forces it to be grouped by that tag, we've just included it in the example for clarity.
|
|
||||||
|
|
||||||
With `SELECT ... INTO`, fields will be written as fields and tags will be written as tags.
|
|
||||||
|
|
||||||
### Continuous Query Syntax
|
|
||||||
|
|
||||||
The `INTO` queries run once. Continuous queries will turn `INTO` queries into something that run in the background in the cluster. They're kind of like triggers in SQL.
|
|
||||||
|
|
||||||
```sql
|
|
||||||
CREATE CONTINUOUS QUERY "1h_cpu_load"
|
|
||||||
ON database_name
|
|
||||||
BEGIN
|
|
||||||
SELECT mean(value) as value, region
|
|
||||||
INTO "1h.cpu_load"
|
|
||||||
FROM cpu_load
|
|
||||||
GROUP BY time(1h), region
|
|
||||||
END
|
|
||||||
```
|
|
||||||
|
|
||||||
Or chain them together:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
CREATE CONTINUOUS QUERY "10m_event_count"
|
|
||||||
ON database_name
|
|
||||||
BEGIN
|
|
||||||
SELECT count(value)
|
|
||||||
INTO "10m.events"
|
|
||||||
FROM events
|
|
||||||
GROUP BY time(10m)
|
|
||||||
END
|
|
||||||
|
|
||||||
-- this selects from the output of one continuous query and outputs to another series
|
|
||||||
CREATE CONTINUOUS QUERY "1h_event_count"
|
|
||||||
ON database_name
|
|
||||||
BEGIN
|
|
||||||
SELECT sum(count) as count
|
|
||||||
INTO "1h.events"
|
|
||||||
FROM events
|
|
||||||
GROUP BY time(1h)
|
|
||||||
END
|
|
||||||
```
|
|
||||||
|
|
||||||
Or multiple aggregations from all series in a measurement. This example assumes you have a retention policy named `1h`.
|
|
||||||
|
|
||||||
```sql
|
|
||||||
CREATE CONTINUOUS QUERY "1h_cpu_load"
|
|
||||||
ON database_name
|
|
||||||
BEGIN
|
|
||||||
SELECT mean(value), percentile(80, value) as percentile_80, percentile(95, value) as percentile_95
|
|
||||||
INTO "1h.cpu_load"
|
|
||||||
FROM cpu_load
|
|
||||||
GROUP BY time(1h), *
|
|
||||||
END
|
|
||||||
```
|
|
||||||
|
|
||||||
The `GROUP BY *` indicates that we want to group by the tagset of the points written in. The same tags will be written to the output series. The multiple aggregates in the `SELECT` clause (percentile, mean) will be written in as fields to the resulting series.
|
|
||||||
|
|
||||||
Showing what continuous queries we have:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
LIST CONTINUOUS QUERIES
|
|
||||||
```
|
|
||||||
|
|
||||||
Dropping continuous queries:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
DROP CONTINUOUS QUERY <name>
|
|
||||||
ON <database>
|
|
||||||
```
|
|
||||||
|
|
||||||
### Security
|
|
||||||
|
|
||||||
To create or drop a continuous query, the user must be an admin.
|
|
||||||
|
|
||||||
### Limitations
|
|
||||||
|
|
||||||
In order to prevent cycles and endless copying of data, the following limitation is enforced on continuous queries at create time:
|
|
||||||
|
|
||||||
*The output of a continuous query must go to either a different measurement or to a different retention policy.*
|
|
||||||
|
|
||||||
In theory they'd still be able to create a cycle with multiple continuous queries. We should check for these and disallow.
|
|
||||||
|
|
||||||
## Proposed Architecture
|
|
||||||
|
|
||||||
Continuous queries should be stored in the metastore cluster wide. That is, they amount to database schema that should be stored in every server in a cluster.
|
|
||||||
|
|
||||||
Continuous queries will have to be handled in a different way for two different use cases: those that simply copy data (CQs without a group by time) and those that aggregate and downsample data (those with a group by time).
|
|
||||||
|
|
||||||
### No group by time
|
|
||||||
|
|
||||||
For CQs that have no `GROUP BY time` clause, they should be evaluated at the data node as part of the write. The single write should create any other writes for the CQ and submit those in the same request to the brokers to ensure that all writes succeed (both the original and the new CQ writes) or none do.
|
|
||||||
|
|
||||||
I imagine the process going something like this:
|
|
||||||
|
|
||||||
1. Convert the data point into its compact form `<series id><time><values>`
|
|
||||||
2. For each CQ on the measurement and retention policy without a group by time:
|
|
||||||
3. Run the data point through a special query engine that will output 0 or 1 data point
|
|
||||||
4. Goto #1 for each newly generated data point
|
|
||||||
5. Write all the data points in a single call to the brokers
|
|
||||||
6. Return success to the user
|
|
||||||
|
|
||||||
Note that for the generated data points, we need to go through and run this process against them since they can feed into different retention policies, measurements, and new tagsets. On #3 I mention that the output will either be a data point or not. That's because of `WHERE` clauses on the query. However, it will never be more than a single data point.
|
|
||||||
|
|
||||||
I mention that we'll need a special query engine for these types of queries. In this case, they never have an aggregate function. Any query with an aggregate function also has a group by time, and these queries by definition don't have that.
|
|
||||||
|
|
||||||
The only thing we have to worry about is which fields are being selected, and what the where clause looks like. We should be able to put the raw data point through a simple transform function that either outputs another raw points or doesn't.
|
|
||||||
|
|
||||||
I think this transform function be something separate from the regular query planner and engine. It can be in `influxQL` but it should be something fairly simply since the only purpose of these types of queries is to either filter some data out and output to a new series or transform into a new series by dropping tags.
|
|
||||||
|
|
||||||
### Has group by time
|
|
||||||
|
|
||||||
CQs that have a `GROUP BY time` (or aggregate CQs) will need to be handled differently.
|
|
||||||
|
|
||||||
One key point on continuous queries with a group by time is that all their writes should always be `overwrite = true`. That is, they should only have a single data point for each timestamp. This distinction means that continuous queries for previous blocks of time can be safely run multiple times without duplicating data (i.e. they're idempotent).
|
|
||||||
|
|
||||||
There are two different ideas I have for how CQs with group by time could be handled. The first is through periodic updates handled by the Raft Leader. The second would be to expand out writes for each CQ and handle them on the data node.
|
|
||||||
|
|
||||||
#### Periodic Updates
|
|
||||||
|
|
||||||
In this approach the management of how CQs run in a cluster will be centrally located on the Raft Leader. It will be responsible for orchestrating which data nodes run CQs and when.
|
|
||||||
|
|
||||||
The naive approach would be to have the leader hand out each CQ for a block of time periodically. The leader could also rerun CQ for periods of time that have recently passed. This would be an easy way to handle the "lagging data" problem, but it's not precise.
|
|
||||||
|
|
||||||
Unfortunately, there's no easy way to tell cluster wide if there were data points written in an already passed window of time for a CQ. We might be able to add this at the data nodes and have them track it, but it would be quite a bit more work.
|
|
||||||
|
|
||||||
The easy way would just be to have CQs re-execute for periods that recently passed and have some user-configurable window of time that they stop checking after. Then we could give the user the ability to recalculate CQs ranges of time if they need to correct for some problem that occurred or the loading of a bunch of historical data.
|
|
||||||
|
|
||||||
With this approach, we'd have the metadata in the database store the last time each CQ was run. Whenever the Raft leader sent out a command to a data node to handle a CQ, the data node would use this metadata to determine which windows of time it should compute.
|
|
||||||
|
|
||||||
This approach is like what exists in 0.8, with the exception that it will automatically catch data that is lagged behind in a small window of time and give the user the ability to force recalculation.
|
|
||||||
|
|
||||||
#### Expanding writes
|
|
||||||
|
|
||||||
When a write comes into a data node, we could have it evaluated against group by CQs in addition to the non-group by ones. It would then create writes that would then go through the brokers. When the CQ writes arrive at the data nodes, they would have to handle each write differently depending on if it was a write to a raw series or if it was a CQ write.
|
|
||||||
|
|
||||||
Let's lay out a concrete example.
|
|
||||||
|
|
||||||
```sql
|
|
||||||
CREATE CONTINUOUS QUERY "10m_cpu_by_region"
|
|
||||||
ON foo
|
|
||||||
BEGIN
|
|
||||||
SELECT mean(value)
|
|
||||||
INTO cpu_by_region
|
|
||||||
FROM cpu
|
|
||||||
GROUP BY time(10m), region
|
|
||||||
END
|
|
||||||
```
|
|
||||||
|
|
||||||
In this example we write values into `cpu` with the tags `region` and `host`.
|
|
||||||
|
|
||||||
Here's another example CQ:
|
|
||||||
|
|
||||||
```sql
|
|
||||||
CREATE CONTINUOUS QUERY "1h_cpu"
|
|
||||||
ON foo
|
|
||||||
BEGIN
|
|
||||||
SELECT mean(value)
|
|
||||||
INTO "1h.cpu"
|
|
||||||
FROM raw.cpu
|
|
||||||
GROUP BY time(10m), *
|
|
||||||
END
|
|
||||||
```
|
|
||||||
|
|
||||||
That would output one series into the `1h` retention policy for the `cpu` measurement for every series from the `raw` retention policy and the `cpu` measurement.
|
|
||||||
|
|
||||||
Both of these examples would be handled the same way despite one being a big merge of a bunch of series into one and the other being an aggregation of series in a 1-to-1 mapping.
|
|
||||||
|
|
||||||
Say we're collecting data for two hosts in a single region. Then we'd have two distinct series like this:
|
|
||||||
|
|
||||||
```
|
|
||||||
1 - cpu host=serverA region=uswest
|
|
||||||
2 - cpu host=serverB region=uswest
|
|
||||||
```
|
|
||||||
|
|
||||||
Whenever a write came into a server, we'd look at the continuous queries and see if we needed to create new writes. If we had the two CQ examples above, we'd have to expand a single write into two more writes (one for each CQ).
|
|
||||||
|
|
||||||
The first CQ would have to create a new series:
|
|
||||||
|
|
||||||
```
|
|
||||||
3 - cpu_by_region region=uswest
|
|
||||||
```
|
|
||||||
|
|
||||||
The second CQ would use the same series id as the write, but would send it to another retention policy (and thus shard).
|
|
||||||
|
|
||||||
We'd need to keep track of which series + retention policy combinations were the result of a CQ. When the data nodes get writes replicated downward, they would have to handle them like this:
|
|
||||||
|
|
||||||
1. If write is normal, write through
|
|
||||||
2. If write is CQ write, compute based on existing values, write to DB
|
|
||||||
|
|
||||||
#### Approach tradeoffs
|
|
||||||
|
|
||||||
The first approach of periodically running queries would almost certainly be the easiest to implement quickly. It also has the added advantage of not putting additional load on the brokers by ballooning up the number of writes that go through the system.
|
|
||||||
|
|
||||||
The second approach is appealing because it would be accurate regardless of when writes come in. However, it would take more work and cause the number of writes going through the brokers to be multiplied by the number of continuous queries, which might not scale to where we need.
|
|
||||||
|
|
||||||
Also, if the data nodes write for every single update, the load on the underlying storage engine would go up significantly as well.
|
|
458
Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/service.go
generated
vendored
458
Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/service.go
generated
vendored
|
@ -1,458 +0,0 @@
|
||||||
package continuous_querier
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/cluster"
|
|
||||||
"github.com/influxdb/influxdb/influxql"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// When planning a select statement, passing zero tells it not to chunk results. Only applies to raw queries
|
|
||||||
NoChunkingSize = 0
|
|
||||||
)
|
|
||||||
|
|
||||||
// ContinuousQuerier represents a service that executes continuous queries.
|
|
||||||
type ContinuousQuerier interface {
|
|
||||||
// Run executes the named query in the named database. Blank database or name matches all.
|
|
||||||
Run(database, name string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// queryExecutor is an internal interface to make testing easier.
|
|
||||||
type queryExecutor interface {
|
|
||||||
ExecuteQuery(query *influxql.Query, database string, chunkSize int) (<-chan *influxql.Result, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// metaStore is an internal interface to make testing easier.
|
|
||||||
type metaStore interface {
|
|
||||||
IsLeader() bool
|
|
||||||
Databases() ([]meta.DatabaseInfo, error)
|
|
||||||
Database(name string) (*meta.DatabaseInfo, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// pointsWriter is an internal interface to make testing easier.
|
|
||||||
type pointsWriter interface {
|
|
||||||
WritePoints(p *cluster.WritePointsRequest) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Service manages continuous query execution.
|
|
||||||
type Service struct {
|
|
||||||
MetaStore metaStore
|
|
||||||
QueryExecutor queryExecutor
|
|
||||||
PointsWriter pointsWriter
|
|
||||||
Config *Config
|
|
||||||
RunInterval time.Duration
|
|
||||||
// RunCh can be used by clients to signal service to run CQs.
|
|
||||||
RunCh chan struct{}
|
|
||||||
Logger *log.Logger
|
|
||||||
loggingEnabled bool
|
|
||||||
// lastRuns maps CQ name to last time it was run.
|
|
||||||
lastRuns map[string]time.Time
|
|
||||||
stop chan struct{}
|
|
||||||
wg *sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewService returns a new instance of Service.
|
|
||||||
func NewService(c Config) *Service {
|
|
||||||
s := &Service{
|
|
||||||
Config: &c,
|
|
||||||
RunInterval: time.Second,
|
|
||||||
RunCh: make(chan struct{}),
|
|
||||||
loggingEnabled: c.LogEnabled,
|
|
||||||
Logger: log.New(os.Stderr, "[continuous_querier] ", log.LstdFlags),
|
|
||||||
lastRuns: map[string]time.Time{},
|
|
||||||
}
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open starts the service.
|
|
||||||
func (s *Service) Open() error {
|
|
||||||
|
|
||||||
s.Logger.Println("Starting continuous query service")
|
|
||||||
|
|
||||||
if s.stop != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(s.MetaStore != nil, "MetaStore is nil")
|
|
||||||
assert(s.QueryExecutor != nil, "QueryExecutor is nil")
|
|
||||||
assert(s.PointsWriter != nil, "PointsWriter is nil")
|
|
||||||
|
|
||||||
s.stop = make(chan struct{})
|
|
||||||
s.wg = &sync.WaitGroup{}
|
|
||||||
s.wg.Add(1)
|
|
||||||
go s.backgroundLoop()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close stops the service.
|
|
||||||
func (s *Service) Close() error {
|
|
||||||
if s.stop == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
close(s.stop)
|
|
||||||
s.wg.Wait()
|
|
||||||
s.wg = nil
|
|
||||||
s.stop = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogger sets the internal logger to the logger passed in.
|
|
||||||
func (s *Service) SetLogger(l *log.Logger) {
|
|
||||||
s.Logger = l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run runs the specified continuous query, or all CQs if none is specified.
|
|
||||||
func (s *Service) Run(database, name string) error {
|
|
||||||
var dbs []meta.DatabaseInfo
|
|
||||||
|
|
||||||
if database != "" {
|
|
||||||
// Find the requested database.
|
|
||||||
db, err := s.MetaStore.Database(database)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if db == nil {
|
|
||||||
return tsdb.ErrDatabaseNotFound(database)
|
|
||||||
}
|
|
||||||
dbs = append(dbs, *db)
|
|
||||||
} else {
|
|
||||||
// Get all databases.
|
|
||||||
var err error
|
|
||||||
dbs, err = s.MetaStore.Databases()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loop through databases.
|
|
||||||
for _, db := range dbs {
|
|
||||||
// Loop through CQs in each DB executing the ones that match name.
|
|
||||||
for _, cq := range db.ContinuousQueries {
|
|
||||||
if name == "" || cq.Name == name {
|
|
||||||
// Reset the last run time for the CQ.
|
|
||||||
s.lastRuns[cq.Name] = time.Time{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signal the background routine to run CQs.
|
|
||||||
s.RunCh <- struct{}{}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// backgroundLoop runs on a go routine and periodically executes CQs.
|
|
||||||
func (s *Service) backgroundLoop() {
|
|
||||||
defer s.wg.Done()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-s.stop:
|
|
||||||
s.Logger.Println("continuous query service terminating")
|
|
||||||
return
|
|
||||||
case <-s.RunCh:
|
|
||||||
if s.MetaStore.IsLeader() {
|
|
||||||
s.Logger.Print("running continuous queries by request")
|
|
||||||
s.runContinuousQueries()
|
|
||||||
}
|
|
||||||
case <-time.After(s.RunInterval):
|
|
||||||
if s.MetaStore.IsLeader() {
|
|
||||||
s.runContinuousQueries()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// runContinuousQueries gets CQs from the meta store and runs them.
|
|
||||||
func (s *Service) runContinuousQueries() {
|
|
||||||
// Get list of all databases.
|
|
||||||
dbs, err := s.MetaStore.Databases()
|
|
||||||
if err != nil {
|
|
||||||
s.Logger.Println("error getting databases")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Loop through all databases executing CQs.
|
|
||||||
for _, db := range dbs {
|
|
||||||
// TODO: distribute across nodes
|
|
||||||
for _, cq := range db.ContinuousQueries {
|
|
||||||
if err := s.ExecuteContinuousQuery(&db, &cq); err != nil {
|
|
||||||
s.Logger.Printf("error executing query: %s: err = %s", cq.Query, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecuteContinuousQuery executes a single CQ.
|
|
||||||
func (s *Service) ExecuteContinuousQuery(dbi *meta.DatabaseInfo, cqi *meta.ContinuousQueryInfo) error {
|
|
||||||
// TODO: re-enable stats
|
|
||||||
//s.stats.Inc("continuousQueryExecuted")
|
|
||||||
|
|
||||||
// Local wrapper / helper.
|
|
||||||
cq, err := NewContinuousQuery(dbi.Name, cqi)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the last time this CQ was run from the service's cache.
|
|
||||||
cq.LastRun = s.lastRuns[cqi.Name]
|
|
||||||
|
|
||||||
// Set the retention policy to default if it wasn't specified in the query.
|
|
||||||
if cq.intoRP() == "" {
|
|
||||||
cq.setIntoRP(dbi.DefaultRetentionPolicy)
|
|
||||||
}
|
|
||||||
|
|
||||||
// See if this query needs to be run.
|
|
||||||
computeNoMoreThan := time.Duration(s.Config.ComputeNoMoreThan)
|
|
||||||
run, err := cq.shouldRunContinuousQuery(s.Config.ComputeRunsPerInterval, computeNoMoreThan)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if !run {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// We're about to run the query so store the time.
|
|
||||||
now := time.Now()
|
|
||||||
cq.LastRun = now
|
|
||||||
s.lastRuns[cqi.Name] = now
|
|
||||||
|
|
||||||
// Get the group by interval.
|
|
||||||
interval, err := cq.q.GroupByInterval()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if interval == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate and set the time range for the query.
|
|
||||||
startTime := now.Round(interval)
|
|
||||||
if startTime.UnixNano() > now.UnixNano() {
|
|
||||||
startTime = startTime.Add(-interval)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cq.q.SetTimeRange(startTime, startTime.Add(interval)); err != nil {
|
|
||||||
s.Logger.Printf("error setting time range: %s\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.loggingEnabled {
|
|
||||||
s.Logger.Printf("executing continuous query %s", cq.Info.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do the actual processing of the query & writing of results.
|
|
||||||
if err := s.runContinuousQueryAndWriteResult(cq); err != nil {
|
|
||||||
s.Logger.Printf("error: %s. running: %s\n", err, cq.q.String())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
recomputeNoOlderThan := time.Duration(s.Config.RecomputeNoOlderThan)
|
|
||||||
|
|
||||||
for i := 0; i < s.Config.RecomputePreviousN; i++ {
|
|
||||||
// if we're already more time past the previous window than we're going to look back, stop
|
|
||||||
if now.Sub(startTime) > recomputeNoOlderThan {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
newStartTime := startTime.Add(-interval)
|
|
||||||
|
|
||||||
if err := cq.q.SetTimeRange(newStartTime, startTime); err != nil {
|
|
||||||
s.Logger.Printf("error setting time range: %s\n", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runContinuousQueryAndWriteResult(cq); err != nil {
|
|
||||||
s.Logger.Printf("error during recompute previous: %s. running: %s\n", err, cq.q.String())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
startTime = newStartTime
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// runContinuousQueryAndWriteResult will run the query against the cluster and write the results back in
|
|
||||||
func (s *Service) runContinuousQueryAndWriteResult(cq *ContinuousQuery) error {
|
|
||||||
// Wrap the CQ's inner SELECT statement in a Query for the QueryExecutor.
|
|
||||||
q := &influxql.Query{
|
|
||||||
Statements: influxql.Statements{cq.q},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute the SELECT.
|
|
||||||
ch, err := s.QueryExecutor.ExecuteQuery(q, cq.Database, NoChunkingSize)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Drain results
|
|
||||||
defer func() {
|
|
||||||
for _ = range ch {
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Read all rows from the result channel.
|
|
||||||
points := make([]tsdb.Point, 0, 100)
|
|
||||||
for result := range ch {
|
|
||||||
if result.Err != nil {
|
|
||||||
return result.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, row := range result.Series {
|
|
||||||
// Convert the result row to points.
|
|
||||||
part, err := s.convertRowToPoints(cq.intoMeasurement(), row)
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(part) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the points have any nil values, can't write.
|
|
||||||
// This happens if the CQ is created and running before data is written to the measurement.
|
|
||||||
for _, p := range part {
|
|
||||||
fields := p.Fields()
|
|
||||||
for _, v := range fields {
|
|
||||||
if v == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
points = append(points, part...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(points) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a write request for the points.
|
|
||||||
req := &cluster.WritePointsRequest{
|
|
||||||
Database: cq.intoDB(),
|
|
||||||
RetentionPolicy: cq.intoRP(),
|
|
||||||
ConsistencyLevel: cluster.ConsistencyLevelAny,
|
|
||||||
Points: points,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the request.
|
|
||||||
if err := s.PointsWriter.WritePoints(req); err != nil {
|
|
||||||
s.Logger.Println(err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.loggingEnabled {
|
|
||||||
s.Logger.Printf("wrote %d point(s) to %s.%s.%s", len(points), cq.intoDB(), cq.intoRP(), cq.Info.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// convertRowToPoints will convert a query result Row into Points that can be written back in.
|
|
||||||
// Used for continuous and INTO queries
|
|
||||||
func (s *Service) convertRowToPoints(measurementName string, row *influxql.Row) ([]tsdb.Point, error) {
|
|
||||||
// figure out which parts of the result are the time and which are the fields
|
|
||||||
timeIndex := -1
|
|
||||||
fieldIndexes := make(map[string]int)
|
|
||||||
for i, c := range row.Columns {
|
|
||||||
if c == "time" {
|
|
||||||
timeIndex = i
|
|
||||||
} else {
|
|
||||||
fieldIndexes[c] = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if timeIndex == -1 {
|
|
||||||
return nil, errors.New("error finding time index in result")
|
|
||||||
}
|
|
||||||
|
|
||||||
points := make([]tsdb.Point, 0, len(row.Values))
|
|
||||||
for _, v := range row.Values {
|
|
||||||
vals := make(map[string]interface{})
|
|
||||||
for fieldName, fieldIndex := range fieldIndexes {
|
|
||||||
vals[fieldName] = v[fieldIndex]
|
|
||||||
}
|
|
||||||
|
|
||||||
p := tsdb.NewPoint(measurementName, row.Tags, vals, v[timeIndex].(time.Time))
|
|
||||||
|
|
||||||
points = append(points, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return points, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContinuousQuery is a local wrapper / helper around continuous queries.
|
|
||||||
type ContinuousQuery struct {
|
|
||||||
Database string
|
|
||||||
Info *meta.ContinuousQueryInfo
|
|
||||||
LastRun time.Time
|
|
||||||
q *influxql.SelectStatement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cq *ContinuousQuery) intoDB() string { return cq.q.Target.Measurement.Database }
|
|
||||||
func (cq *ContinuousQuery) intoRP() string { return cq.q.Target.Measurement.RetentionPolicy }
|
|
||||||
func (cq *ContinuousQuery) setIntoRP(rp string) { cq.q.Target.Measurement.RetentionPolicy = rp }
|
|
||||||
func (cq *ContinuousQuery) intoMeasurement() string { return cq.q.Target.Measurement.Name }
|
|
||||||
|
|
||||||
// NewContinuousQuery returns a ContinuousQuery object with a parsed influxql.CreateContinuousQueryStatement
|
|
||||||
func NewContinuousQuery(database string, cqi *meta.ContinuousQueryInfo) (*ContinuousQuery, error) {
|
|
||||||
stmt, err := influxql.NewParser(strings.NewReader(cqi.Query)).ParseStatement()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
q, ok := stmt.(*influxql.CreateContinuousQueryStatement)
|
|
||||||
if !ok || q.Source.Target == nil || q.Source.Target.Measurement == nil {
|
|
||||||
return nil, errors.New("query isn't a valid continuous query")
|
|
||||||
}
|
|
||||||
|
|
||||||
cquery := &ContinuousQuery{
|
|
||||||
Database: database,
|
|
||||||
Info: cqi,
|
|
||||||
q: q.Source,
|
|
||||||
}
|
|
||||||
|
|
||||||
return cquery, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRunContinuousQuery returns true if the CQ should be schedule to run. It will use the
|
|
||||||
// lastRunTime of the CQ and the rules for when to run set through the config to determine
|
|
||||||
// if this CQ should be run
|
|
||||||
func (cq *ContinuousQuery) shouldRunContinuousQuery(runsPerInterval int, noMoreThan time.Duration) (bool, error) {
|
|
||||||
// if it's not aggregated we don't run it
|
|
||||||
if cq.q.IsRawQuery {
|
|
||||||
return false, errors.New("continuous queries must be aggregate queries")
|
|
||||||
}
|
|
||||||
|
|
||||||
// since it's aggregated we need to figure how often it should be run
|
|
||||||
interval, err := cq.q.GroupByInterval()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// determine how often we should run this continuous query.
|
|
||||||
// group by time / the number of times to compute
|
|
||||||
computeEvery := time.Duration(interval.Nanoseconds()/int64(runsPerInterval)) * time.Nanosecond
|
|
||||||
// make sure we're running no more frequently than the setting in the config
|
|
||||||
if computeEvery < noMoreThan {
|
|
||||||
computeEvery = noMoreThan
|
|
||||||
}
|
|
||||||
|
|
||||||
// if we've passed the amount of time since the last run, do it up
|
|
||||||
if cq.LastRun.Add(computeEvery).UnixNano() <= time.Now().UnixNano() {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// assert will panic with a given formatted message if the given condition is false.
|
|
||||||
func assert(condition bool, msg string, v ...interface{}) {
|
|
||||||
if !condition {
|
|
||||||
panic(fmt.Sprintf("assert failed: "+msg, v...))
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,503 +0,0 @@
|
||||||
package continuous_querier
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/cluster"
|
|
||||||
"github.com/influxdb/influxdb/influxql"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
expectedErr = errors.New("expected error")
|
|
||||||
unexpectedErr = errors.New("unexpected error")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Test closing never opened, open, open already open, close, and close already closed.
|
|
||||||
func TestOpenAndClose(t *testing.T) {
|
|
||||||
s := NewTestService(t)
|
|
||||||
|
|
||||||
if err := s.Close(); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
} else if err = s.Open(); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
} else if err = s.Open(); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
} else if err = s.Close(); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
} else if err = s.Close(); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test ExecuteContinuousQuery happy path.
|
|
||||||
func TestExecuteContinuousQuery_HappyPath(t *testing.T) {
|
|
||||||
s := NewTestService(t)
|
|
||||||
dbis, _ := s.MetaStore.Databases()
|
|
||||||
dbi := dbis[0]
|
|
||||||
cqi := dbi.ContinuousQueries[0]
|
|
||||||
|
|
||||||
pointCnt := 100
|
|
||||||
qe := s.QueryExecutor.(*QueryExecutor)
|
|
||||||
qe.Results = []*influxql.Result{genResult(1, pointCnt)}
|
|
||||||
|
|
||||||
pw := s.PointsWriter.(*PointsWriter)
|
|
||||||
pw.WritePointsFn = func(p *cluster.WritePointsRequest) error {
|
|
||||||
if len(p.Points) != pointCnt {
|
|
||||||
return fmt.Errorf("exp = %d, got = %d", pointCnt, len(p.Points))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err := s.ExecuteContinuousQuery(&dbi, &cqi)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test the service happy path.
|
|
||||||
func TestService_HappyPath(t *testing.T) {
|
|
||||||
s := NewTestService(t)
|
|
||||||
|
|
||||||
pointCnt := 100
|
|
||||||
qe := s.QueryExecutor.(*QueryExecutor)
|
|
||||||
qe.Results = []*influxql.Result{genResult(1, pointCnt)}
|
|
||||||
|
|
||||||
pw := s.PointsWriter.(*PointsWriter)
|
|
||||||
ch := make(chan int, 5)
|
|
||||||
defer close(ch)
|
|
||||||
pw.WritePointsFn = func(p *cluster.WritePointsRequest) error {
|
|
||||||
ch <- len(p.Points)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Open()
|
|
||||||
if cnt, err := waitInt(ch, time.Second); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
} else if cnt != pointCnt {
|
|
||||||
t.Errorf("exp = %d, got = %d", pointCnt, cnt)
|
|
||||||
}
|
|
||||||
s.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test Run method.
|
|
||||||
func TestService_Run(t *testing.T) {
|
|
||||||
s := NewTestService(t)
|
|
||||||
|
|
||||||
// Set RunInterval high so we can trigger using Run method.
|
|
||||||
s.RunInterval = 10 * time.Minute
|
|
||||||
|
|
||||||
// Only want one call to ExecuteQueryFn per CQ.
|
|
||||||
s.Config.RecomputePreviousN = 0
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
expectCallCnt := 2
|
|
||||||
callCnt := 0
|
|
||||||
|
|
||||||
// Set a callback for ExecuteQuery.
|
|
||||||
qe := s.QueryExecutor.(*QueryExecutor)
|
|
||||||
qe.ExecuteQueryFn = func(query *influxql.Query, database string, chunkSize int) (<-chan *influxql.Result, error) {
|
|
||||||
callCnt++
|
|
||||||
if callCnt >= expectCallCnt {
|
|
||||||
done <- struct{}{}
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Open()
|
|
||||||
// Trigger service to run all CQs.
|
|
||||||
s.Run("", "")
|
|
||||||
// Shouldn't time out.
|
|
||||||
if err := wait(done, 100*time.Millisecond); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
// This time it should timeout because ExecuteQuery should not get called again.
|
|
||||||
if err := wait(done, 100*time.Millisecond); err == nil {
|
|
||||||
t.Error("too many queries executed")
|
|
||||||
}
|
|
||||||
s.Close()
|
|
||||||
|
|
||||||
// Now test just one query.
|
|
||||||
expectCallCnt = 1
|
|
||||||
callCnt = 0
|
|
||||||
s.Open()
|
|
||||||
s.Run("db", "cq")
|
|
||||||
// Shouldn't time out.
|
|
||||||
if err := wait(done, 100*time.Millisecond); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
// This time it should timeout because ExecuteQuery should not get called again.
|
|
||||||
if err := wait(done, 100*time.Millisecond); err == nil {
|
|
||||||
t.Error("too many queries executed")
|
|
||||||
}
|
|
||||||
s.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test service when not the cluster leader (CQs shouldn't run).
|
|
||||||
func TestService_NotLeader(t *testing.T) {
|
|
||||||
s := NewTestService(t)
|
|
||||||
// Set RunInterval high so we can test triggering with the RunCh below.
|
|
||||||
s.RunInterval = 10 * time.Second
|
|
||||||
s.MetaStore.(*MetaStore).Leader = false
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
qe := s.QueryExecutor.(*QueryExecutor)
|
|
||||||
// Set a callback for ExecuteQuery. Shouldn't get called because we're not the leader.
|
|
||||||
qe.ExecuteQueryFn = func(query *influxql.Query, database string, chunkSize int) (<-chan *influxql.Result, error) {
|
|
||||||
done <- struct{}{}
|
|
||||||
return nil, unexpectedErr
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Open()
|
|
||||||
// Trigger service to run CQs.
|
|
||||||
s.RunCh <- struct{}{}
|
|
||||||
// Expect timeout error because ExecuteQuery callback wasn't called.
|
|
||||||
if err := wait(done, 100*time.Millisecond); err == nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
s.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test service behavior when meta store fails to get databases.
|
|
||||||
func TestService_MetaStoreFailsToGetDatabases(t *testing.T) {
|
|
||||||
s := NewTestService(t)
|
|
||||||
// Set RunInterval high so we can test triggering with the RunCh below.
|
|
||||||
s.RunInterval = 10 * time.Second
|
|
||||||
s.MetaStore.(*MetaStore).Err = expectedErr
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
qe := s.QueryExecutor.(*QueryExecutor)
|
|
||||||
// Set ExecuteQuery callback, which shouldn't get called because of meta store failure.
|
|
||||||
qe.ExecuteQueryFn = func(query *influxql.Query, database string, chunkSize int) (<-chan *influxql.Result, error) {
|
|
||||||
done <- struct{}{}
|
|
||||||
return nil, unexpectedErr
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Open()
|
|
||||||
// Trigger service to run CQs.
|
|
||||||
s.RunCh <- struct{}{}
|
|
||||||
// Expect timeout error because ExecuteQuery callback wasn't called.
|
|
||||||
if err := wait(done, 100*time.Millisecond); err == nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
s.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test ExecuteContinuousQuery with invalid queries.
|
|
||||||
func TestExecuteContinuousQuery_InvalidQueries(t *testing.T) {
|
|
||||||
s := NewTestService(t)
|
|
||||||
dbis, _ := s.MetaStore.Databases()
|
|
||||||
dbi := dbis[0]
|
|
||||||
cqi := dbi.ContinuousQueries[0]
|
|
||||||
|
|
||||||
cqi.Query = `this is not a query`
|
|
||||||
err := s.ExecuteContinuousQuery(&dbi, &cqi)
|
|
||||||
if err == nil {
|
|
||||||
t.Error("expected error but got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Valid query but invalid continuous query.
|
|
||||||
cqi.Query = `SELECT * FROM cpu`
|
|
||||||
err = s.ExecuteContinuousQuery(&dbi, &cqi)
|
|
||||||
if err == nil {
|
|
||||||
t.Error("expected error but got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Group by requires aggregate.
|
|
||||||
cqi.Query = `SELECT value INTO other_value FROM cpu WHERE time > now() - 1h GROUP BY time(1s)`
|
|
||||||
err = s.ExecuteContinuousQuery(&dbi, &cqi)
|
|
||||||
if err == nil {
|
|
||||||
t.Error("expected error but got nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test ExecuteContinuousQuery when QueryExecutor returns an error.
|
|
||||||
func TestExecuteContinuousQuery_QueryExecutor_Error(t *testing.T) {
|
|
||||||
s := NewTestService(t)
|
|
||||||
qe := s.QueryExecutor.(*QueryExecutor)
|
|
||||||
qe.Err = expectedErr
|
|
||||||
|
|
||||||
dbis, _ := s.MetaStore.Databases()
|
|
||||||
dbi := dbis[0]
|
|
||||||
cqi := dbi.ContinuousQueries[0]
|
|
||||||
|
|
||||||
err := s.ExecuteContinuousQuery(&dbi, &cqi)
|
|
||||||
if err != expectedErr {
|
|
||||||
t.Errorf("exp = %s, got = %v", expectedErr, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTestService returns a new *Service with default mock object members.
|
|
||||||
func NewTestService(t *testing.T) *Service {
|
|
||||||
s := NewService(NewConfig())
|
|
||||||
ms := NewMetaStore(t)
|
|
||||||
s.MetaStore = ms
|
|
||||||
s.QueryExecutor = NewQueryExecutor(t)
|
|
||||||
s.PointsWriter = NewPointsWriter(t)
|
|
||||||
s.RunInterval = time.Millisecond
|
|
||||||
|
|
||||||
// Set Logger to write to dev/null so stdout isn't polluted.
|
|
||||||
if !testing.Verbose() {
|
|
||||||
s.Logger = log.New(ioutil.Discard, "", log.LstdFlags)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a couple test databases and CQs.
|
|
||||||
ms.CreateDatabase("db", "rp")
|
|
||||||
ms.CreateContinuousQuery("db", "cq", `CREATE CONTINUOUS QUERY cq ON db BEGIN SELECT count(cpu) INTO cpu_count FROM cpu WHERE time > now() - 1h GROUP BY time(1s) END`)
|
|
||||||
ms.CreateDatabase("db2", "default")
|
|
||||||
ms.CreateContinuousQuery("db2", "cq2", `CREATE CONTINUOUS QUERY cq2 ON db2 BEGIN SELECT mean(value) INTO cpu_mean FROM cpu WHERE time > now() - 10m GROUP BY time(1m) END`)
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// MetaStore is a mock meta store.
|
|
||||||
type MetaStore struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
Leader bool
|
|
||||||
DatabaseInfos []meta.DatabaseInfo
|
|
||||||
Err error
|
|
||||||
t *testing.T
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMetaStore returns a *MetaStore.
|
|
||||||
func NewMetaStore(t *testing.T) *MetaStore {
|
|
||||||
return &MetaStore{
|
|
||||||
Leader: true,
|
|
||||||
t: t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsLeader returns true if the node is the cluster leader.
|
|
||||||
func (ms *MetaStore) IsLeader() bool {
|
|
||||||
ms.mu.RLock()
|
|
||||||
defer ms.mu.RUnlock()
|
|
||||||
return ms.Leader
|
|
||||||
}
|
|
||||||
|
|
||||||
// Databases returns a list of database info about each database in the cluster.
|
|
||||||
func (ms *MetaStore) Databases() ([]meta.DatabaseInfo, error) {
|
|
||||||
ms.mu.RLock()
|
|
||||||
defer ms.mu.RUnlock()
|
|
||||||
return ms.DatabaseInfos, ms.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Database returns a single database by name.
|
|
||||||
func (ms *MetaStore) Database(name string) (*meta.DatabaseInfo, error) {
|
|
||||||
ms.mu.RLock()
|
|
||||||
defer ms.mu.RUnlock()
|
|
||||||
return ms.database(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MetaStore) database(name string) (*meta.DatabaseInfo, error) {
|
|
||||||
if ms.Err != nil {
|
|
||||||
return nil, ms.Err
|
|
||||||
}
|
|
||||||
for i := range ms.DatabaseInfos {
|
|
||||||
if ms.DatabaseInfos[i].Name == name {
|
|
||||||
return &ms.DatabaseInfos[i], nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("database not found: %s", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateDatabase adds a new database to the meta store.
|
|
||||||
func (ms *MetaStore) CreateDatabase(name, defaultRetentionPolicy string) error {
|
|
||||||
ms.mu.Lock()
|
|
||||||
defer ms.mu.Unlock()
|
|
||||||
if ms.Err != nil {
|
|
||||||
return ms.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
// See if the database already exists.
|
|
||||||
for _, dbi := range ms.DatabaseInfos {
|
|
||||||
if dbi.Name == name {
|
|
||||||
return fmt.Errorf("database already exists: %s", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create database.
|
|
||||||
ms.DatabaseInfos = append(ms.DatabaseInfos, meta.DatabaseInfo{
|
|
||||||
Name: name,
|
|
||||||
DefaultRetentionPolicy: defaultRetentionPolicy,
|
|
||||||
})
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateContinuousQuery adds a CQ to the meta store.
|
|
||||||
func (ms *MetaStore) CreateContinuousQuery(database, name, query string) error {
|
|
||||||
ms.mu.Lock()
|
|
||||||
defer ms.mu.Unlock()
|
|
||||||
if ms.Err != nil {
|
|
||||||
return ms.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
dbi, err := ms.database(database)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if dbi == nil {
|
|
||||||
return fmt.Errorf("database not found: %s", database)
|
|
||||||
}
|
|
||||||
|
|
||||||
// See if CQ already exists.
|
|
||||||
for _, cqi := range dbi.ContinuousQueries {
|
|
||||||
if cqi.Name == name {
|
|
||||||
return fmt.Errorf("continuous query already exists: %s", name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new CQ and store it.
|
|
||||||
dbi.ContinuousQueries = append(dbi.ContinuousQueries, meta.ContinuousQueryInfo{
|
|
||||||
Name: name,
|
|
||||||
Query: query,
|
|
||||||
})
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryExecutor is a mock query executor.
|
|
||||||
type QueryExecutor struct {
|
|
||||||
ExecuteQueryFn func(query *influxql.Query, database string, chunkSize int) (<-chan *influxql.Result, error)
|
|
||||||
Results []*influxql.Result
|
|
||||||
ResultInterval time.Duration
|
|
||||||
Err error
|
|
||||||
ErrAfterResult int
|
|
||||||
StopRespondingAfter int
|
|
||||||
t *testing.T
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewQueryExecutor returns a *QueryExecutor.
|
|
||||||
func NewQueryExecutor(t *testing.T) *QueryExecutor {
|
|
||||||
return &QueryExecutor{
|
|
||||||
ErrAfterResult: -1,
|
|
||||||
StopRespondingAfter: -1,
|
|
||||||
t: t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecuteQuery returns a channel that the caller can read query results from.
|
|
||||||
func (qe *QueryExecutor) ExecuteQuery(query *influxql.Query, database string, chunkSize int) (<-chan *influxql.Result, error) {
|
|
||||||
|
|
||||||
// If the test set a callback, call it.
|
|
||||||
if qe.ExecuteQueryFn != nil {
|
|
||||||
if _, err := qe.ExecuteQueryFn(query, database, chunkSize); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Are we supposed to error immediately?
|
|
||||||
if qe.ErrAfterResult == -1 && qe.Err != nil {
|
|
||||||
return nil, qe.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
ch := make(chan *influxql.Result)
|
|
||||||
|
|
||||||
// Start a go routine to send results and / or error.
|
|
||||||
go func() {
|
|
||||||
n := 0
|
|
||||||
for i, r := range qe.Results {
|
|
||||||
if i == qe.ErrAfterResult-1 {
|
|
||||||
qe.t.Logf("ExecuteQuery(): ErrAfterResult %d", qe.ErrAfterResult-1)
|
|
||||||
ch <- &influxql.Result{Err: qe.Err}
|
|
||||||
close(ch)
|
|
||||||
return
|
|
||||||
} else if i == qe.StopRespondingAfter {
|
|
||||||
qe.t.Log("ExecuteQuery(): StopRespondingAfter")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ch <- r
|
|
||||||
n++
|
|
||||||
time.Sleep(qe.ResultInterval)
|
|
||||||
}
|
|
||||||
qe.t.Logf("ExecuteQuery(): all (%d) results sent", n)
|
|
||||||
close(ch)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return ch, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PointsWriter is a mock points writer.
|
|
||||||
type PointsWriter struct {
|
|
||||||
WritePointsFn func(p *cluster.WritePointsRequest) error
|
|
||||||
Err error
|
|
||||||
PointsPerSecond int
|
|
||||||
t *testing.T
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPointsWriter returns a new *PointsWriter.
|
|
||||||
func NewPointsWriter(t *testing.T) *PointsWriter {
|
|
||||||
return &PointsWriter{
|
|
||||||
PointsPerSecond: 25000,
|
|
||||||
t: t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WritePoints mocks writing points.
|
|
||||||
func (pw *PointsWriter) WritePoints(p *cluster.WritePointsRequest) error {
|
|
||||||
// If the test set a callback, call it.
|
|
||||||
if pw.WritePointsFn != nil {
|
|
||||||
if err := pw.WritePointsFn(p); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if pw.Err != nil {
|
|
||||||
return pw.Err
|
|
||||||
}
|
|
||||||
ns := time.Duration((1 / pw.PointsPerSecond) * 1000000000)
|
|
||||||
time.Sleep(ns)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// genResult generates a dummy query result.
|
|
||||||
func genResult(rowCnt, valCnt int) *influxql.Result {
|
|
||||||
rows := make(influxql.Rows, 0, rowCnt)
|
|
||||||
now := time.Now()
|
|
||||||
for n := 0; n < rowCnt; n++ {
|
|
||||||
vals := make([][]interface{}, 0, valCnt)
|
|
||||||
for m := 0; m < valCnt; m++ {
|
|
||||||
vals = append(vals, []interface{}{now, float64(m)})
|
|
||||||
now.Add(time.Second)
|
|
||||||
}
|
|
||||||
row := &influxql.Row{
|
|
||||||
Name: "cpu",
|
|
||||||
Tags: map[string]string{"host": "server01"},
|
|
||||||
Columns: []string{"time", "value"},
|
|
||||||
Values: vals,
|
|
||||||
}
|
|
||||||
rows = append(rows, row)
|
|
||||||
}
|
|
||||||
return &influxql.Result{
|
|
||||||
Series: rows,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func wait(c chan struct{}, d time.Duration) (err error) {
|
|
||||||
select {
|
|
||||||
case <-c:
|
|
||||||
case <-time.After(d):
|
|
||||||
err = errors.New("timed out")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitInt(c chan int, d time.Duration) (i int, err error) {
|
|
||||||
select {
|
|
||||||
case i = <-c:
|
|
||||||
case <-time.After(d):
|
|
||||||
err = errors.New("timed out")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func check(err error) {
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
125
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/README.md
generated
vendored
125
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/README.md
generated
vendored
|
@ -1,125 +0,0 @@
|
||||||
## Introduction
|
|
||||||
|
|
||||||
The graphite plugin allows measurements to be saved using the graphite line protocol. By default, enabling the graphite plugin will allow you to collect metrics and store them using the metric name as the measurement. If you send a metric named `servers.localhost.cpu.loadavg.10`, it will store the full metric name as the measurement with no extracted tags.
|
|
||||||
|
|
||||||
While this default setup works, it is not the ideal way to store measurements in InfluxDB since it does not take advantage of tags. It also will not perform optimally with a large dataset sizes since queries will be forced to use regexes which is known to not scale well.
|
|
||||||
|
|
||||||
To extract tags from metrics, one or more templates must be configured to parse metrics into tags and measurements.
|
|
||||||
|
|
||||||
## Templates
|
|
||||||
|
|
||||||
Templates allow matching parts of a metric name to be used as tag names in the stored metric. They have a similar format to graphite metric names. The values in between the separators are used as the tag name. The location of the tag name that matches the same position as the graphite metric section is used as the value. If there is no value, the graphite portion is skipped.
|
|
||||||
|
|
||||||
The special value _measurement_ is used to define the measurement name. It can have a trailing `*` to indicate that the remainder of the metric should be used. If a _measurement_ is not specified, the full metric name is used.
|
|
||||||
|
|
||||||
### Basic Matching
|
|
||||||
|
|
||||||
`servers.localhost.cpu.loadavg.10`
|
|
||||||
* Template: `.host.resource.measurement*`
|
|
||||||
* Output: _measurement_ =`loadavg.10` _tags_ =`host=localhost resource=cpu`
|
|
||||||
|
|
||||||
### Multiple Measurement Matching
|
|
||||||
|
|
||||||
The _measurement_ can be specified multiple times in a template to provide more control over the measurement name. Multiple values
|
|
||||||
will be joined together using the _Separator_ config variable. By default, this value is `.`.
|
|
||||||
|
|
||||||
`servers.localhost.cpu.cpu0.user`
|
|
||||||
* Template: `.host.measurement.cpu.measurement`
|
|
||||||
* Output: _measurement_ = `cpu.user` _tags_ = `host=localhost cpu=cpu0`
|
|
||||||
|
|
||||||
Since '.' requires queries on measurements to be double-quoted, you may want to set this to `_` to simplify querying parsed metrics.
|
|
||||||
|
|
||||||
`servers.localhost.cpu.cpu0.user`
|
|
||||||
* Separator: `_`
|
|
||||||
* Template: `.host.measurement.cpu.measurement`
|
|
||||||
* Output: _measurement_ = `cpu_user` _tags_ = `host=localhost cpu=cpu0`
|
|
||||||
|
|
||||||
### Adding Tags
|
|
||||||
|
|
||||||
Additional tags can be added to a metric that don't exist on the received metric. You can add additional tags by specifying them after the pattern. Tags have the same format as the line protocol. Multiple tags are separated by commas.
|
|
||||||
|
|
||||||
`servers.localhost.cpu.loadavg.10`
|
|
||||||
* Template: `.host.resource.measurement* region=us-west,zone=1a`
|
|
||||||
* Output: _measurement_ = `loading.10` _tags_ = `host=localhost resource=cpu region=us-west zone=1a`
|
|
||||||
|
|
||||||
## Multiple Templates
|
|
||||||
|
|
||||||
One template may not match all metrics. For example, using multiple plugins with diamond will produce metrics in different formats. If you need to use multiple templates, you'll need to define a prefix filter that must match before the template can be applied.
|
|
||||||
|
|
||||||
### Filters
|
|
||||||
|
|
||||||
Filters have a similar format to templates but work more like wildcard expressions. When multiple filters would match a metric, the more specific one is chosen. Filters are configured by adding them before the template.
|
|
||||||
|
|
||||||
For example,
|
|
||||||
|
|
||||||
```
|
|
||||||
servers.localhost.cpu.loadavg.10
|
|
||||||
servers.host123.elasticsearch.cache_hits 100
|
|
||||||
servers.host456.mysql.tx_count 10
|
|
||||||
```
|
|
||||||
* `servers.*` would match all values
|
|
||||||
* `servers.*.mysql` would match `servers.host456.mysql.tx_count 10`
|
|
||||||
* `servers.localhost.*` would match `servers.localhost.cpu.loadavg`
|
|
||||||
|
|
||||||
## Default Templates
|
|
||||||
|
|
||||||
If no template filters are defined or you want to just have one basic template, you can define a default template. This template will apply to any metric that has not already matched a filter.
|
|
||||||
|
|
||||||
```
|
|
||||||
dev.http.requests.200
|
|
||||||
prod.myapp.errors.count
|
|
||||||
dev.db.queries.count
|
|
||||||
```
|
|
||||||
|
|
||||||
* `env.app.measurement*` would create
|
|
||||||
* _measurement_=`requests.200` _tags_=`env=dev,app=http`
|
|
||||||
* _measurement_= `errors.count` _tags_=`env=prod,app=myapp`
|
|
||||||
* _measurement_=`queries.count` _tags_=`env=dev,app=db`
|
|
||||||
|
|
||||||
## Global Tags
|
|
||||||
|
|
||||||
If you need to add the same set of tags to all metrics, you can define them globally at the plugin level and not within each template description.
|
|
||||||
|
|
||||||
## Minimal Config
|
|
||||||
```
|
|
||||||
[[graphite]]
|
|
||||||
enabled = true
|
|
||||||
# bind-address = ":2003"
|
|
||||||
# protocol = "tcp"
|
|
||||||
# consistency-level = "one"
|
|
||||||
|
|
||||||
### If matching multiple measurement files, this string will be used to join the matched values.
|
|
||||||
# separator = "."
|
|
||||||
|
|
||||||
### Default tags that will be added to all metrics. These can be overridden at the template level
|
|
||||||
### or by tags extracted from metric
|
|
||||||
# tags = ["region=us-east", "zone=1c"]
|
|
||||||
|
|
||||||
### Each template line requires a template pattern. It can have an optional
|
|
||||||
### filter before the template and separated by spaces. It can also have optional extra
|
|
||||||
### tags following the template. Multiple tags should be separated by commas and no spaces
|
|
||||||
### similar to the line protocol format. The can be only one default template.
|
|
||||||
# templates = [
|
|
||||||
# "*.app env.service.resource.measurement",
|
|
||||||
# # Default template
|
|
||||||
# "server.*",
|
|
||||||
#]
|
|
||||||
```
|
|
||||||
|
|
||||||
## Customized Config
|
|
||||||
```
|
|
||||||
[[graphite]]
|
|
||||||
enabled = true
|
|
||||||
separator = "_"
|
|
||||||
tags = ["region=us-east", "zone=1c"]
|
|
||||||
templates = [
|
|
||||||
# filter + template
|
|
||||||
"*.app env.service.resource.measurement",
|
|
||||||
|
|
||||||
# filter + template + extra tag
|
|
||||||
"stats.* .host.measurement* region=us-west,agent=sensu",
|
|
||||||
|
|
||||||
# default template. Ignore the first graphite component "servers"
|
|
||||||
".measurement*",
|
|
||||||
]
|
|
||||||
```
|
|
221
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/config.go
generated
vendored
221
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/config.go
generated
vendored
|
@ -1,221 +0,0 @@
|
||||||
package graphite
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/toml"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultBindAddress is the default binding interface if none is specified.
|
|
||||||
DefaultBindAddress = ":2003"
|
|
||||||
|
|
||||||
// DefaultDatabase is the default database if none is specified.
|
|
||||||
DefaultDatabase = "graphite"
|
|
||||||
|
|
||||||
// DefaultProtocol is the default IP protocol used by the Graphite input.
|
|
||||||
DefaultProtocol = "tcp"
|
|
||||||
|
|
||||||
// DefaultConsistencyLevel is the default write consistency for the Graphite input.
|
|
||||||
DefaultConsistencyLevel = "one"
|
|
||||||
|
|
||||||
// DefaultSeparator is the default join character to use when joining multiple
|
|
||||||
// measurment parts in a template.
|
|
||||||
DefaultSeparator = "."
|
|
||||||
|
|
||||||
// DefaultBatchSize is the default Graphite batch size.
|
|
||||||
DefaultBatchSize = 1000
|
|
||||||
|
|
||||||
// DefaultBatchTimeout is the default Graphite batch timeout.
|
|
||||||
DefaultBatchTimeout = time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config represents the configuration for Graphite endpoints.
|
|
||||||
type Config struct {
|
|
||||||
BindAddress string `toml:"bind-address"`
|
|
||||||
Database string `toml:"database"`
|
|
||||||
Enabled bool `toml:"enabled"`
|
|
||||||
Protocol string `toml:"protocol"`
|
|
||||||
BatchSize int `toml:"batch-size"`
|
|
||||||
BatchTimeout toml.Duration `toml:"batch-timeout"`
|
|
||||||
ConsistencyLevel string `toml:"consistency-level"`
|
|
||||||
Templates []string `toml:"templates"`
|
|
||||||
Tags []string `toml:"tags"`
|
|
||||||
Separator string `toml:"separator"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConfig returns a new Config with defaults.
|
|
||||||
func NewConfig() Config {
|
|
||||||
return Config{
|
|
||||||
BindAddress: DefaultBindAddress,
|
|
||||||
Database: DefaultDatabase,
|
|
||||||
Protocol: DefaultProtocol,
|
|
||||||
BatchSize: DefaultBatchSize,
|
|
||||||
BatchTimeout: toml.Duration(DefaultBatchTimeout),
|
|
||||||
ConsistencyLevel: DefaultConsistencyLevel,
|
|
||||||
Separator: DefaultSeparator,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDefaults takes the given config and returns a new config with any required
|
|
||||||
// default values set.
|
|
||||||
func (c *Config) WithDefaults() *Config {
|
|
||||||
d := *c
|
|
||||||
if d.BindAddress == "" {
|
|
||||||
d.BindAddress = DefaultBindAddress
|
|
||||||
}
|
|
||||||
if d.Database == "" {
|
|
||||||
d.Database = DefaultDatabase
|
|
||||||
}
|
|
||||||
if d.Protocol == "" {
|
|
||||||
d.Protocol = DefaultProtocol
|
|
||||||
}
|
|
||||||
if d.ConsistencyLevel == "" {
|
|
||||||
d.ConsistencyLevel = DefaultConsistencyLevel
|
|
||||||
}
|
|
||||||
if d.Separator == "" {
|
|
||||||
d.Separator = DefaultSeparator
|
|
||||||
}
|
|
||||||
return &d
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) DefaultTags() tsdb.Tags {
|
|
||||||
tags := tsdb.Tags{}
|
|
||||||
for _, t := range c.Tags {
|
|
||||||
parts := strings.Split(t, "=")
|
|
||||||
tags[parts[0]] = parts[1]
|
|
||||||
}
|
|
||||||
return tags
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) Validate() error {
|
|
||||||
if err := c.validateTemplates(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.validateTags(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) validateTemplates() error {
|
|
||||||
// map to keep track of filters we see
|
|
||||||
filters := map[string]struct{}{}
|
|
||||||
|
|
||||||
for i, t := range c.Templates {
|
|
||||||
parts := strings.Fields(t)
|
|
||||||
// Ensure template string is non-empty
|
|
||||||
if len(parts) == 0 {
|
|
||||||
return fmt.Errorf("missing template at position: %d", i)
|
|
||||||
}
|
|
||||||
if len(parts) == 1 && parts[0] == "" {
|
|
||||||
return fmt.Errorf("missing template at position: %d", i)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(parts) > 3 {
|
|
||||||
return fmt.Errorf("invalid template format: '%s'", t)
|
|
||||||
}
|
|
||||||
|
|
||||||
template := t
|
|
||||||
filter := ""
|
|
||||||
tags := ""
|
|
||||||
if len(parts) >= 2 {
|
|
||||||
// We could have <filter> <template> or <template> <tags>. Equals is only allowed in
|
|
||||||
// tags section.
|
|
||||||
if strings.Contains(parts[1], "=") {
|
|
||||||
template = parts[0]
|
|
||||||
tags = parts[1]
|
|
||||||
} else {
|
|
||||||
filter = parts[0]
|
|
||||||
template = parts[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(parts) == 3 {
|
|
||||||
tags = parts[2]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate the template has one and only one measurement
|
|
||||||
if err := c.validateTemplate(template); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prevent duplicate filters in the config
|
|
||||||
if _, ok := filters[filter]; ok {
|
|
||||||
return fmt.Errorf("duplicate filter '%s' found at position: %d", filter, i)
|
|
||||||
}
|
|
||||||
filters[filter] = struct{}{}
|
|
||||||
|
|
||||||
if filter != "" {
|
|
||||||
// Validate filter expression is valid
|
|
||||||
if err := c.validateFilter(filter); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if tags != "" {
|
|
||||||
// Validate tags
|
|
||||||
for _, tagStr := range strings.Split(tags, ",") {
|
|
||||||
if err := c.validateTag(tagStr); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) validateTags() error {
|
|
||||||
for _, t := range c.Tags {
|
|
||||||
if err := c.validateTag(t); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) validateTemplate(template string) error {
|
|
||||||
hasMeasurement := false
|
|
||||||
for _, p := range strings.Split(template, ".") {
|
|
||||||
if p == "measurement" || p == "measurement*" {
|
|
||||||
hasMeasurement = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !hasMeasurement {
|
|
||||||
return fmt.Errorf("no measurement in template `%s`", template)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) validateFilter(filter string) error {
|
|
||||||
for _, p := range strings.Split(filter, ".") {
|
|
||||||
if p == "" {
|
|
||||||
return fmt.Errorf("filter contains blank section: %s", filter)
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Contains(p, "*") && p != "*" {
|
|
||||||
return fmt.Errorf("invalid filter wildcard section: %s", filter)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) validateTag(keyValue string) error {
|
|
||||||
parts := strings.Split(keyValue, "=")
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("invalid template tags: '%s'", keyValue)
|
|
||||||
}
|
|
||||||
|
|
||||||
if parts[0] == "" || parts[1] == "" {
|
|
||||||
return fmt.Errorf("invalid template tags: %s'", keyValue)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
164
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/config_test.go
generated
vendored
164
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/config_test.go
generated
vendored
|
@ -1,164 +0,0 @@
|
||||||
package graphite_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
"github.com/influxdb/influxdb/services/graphite"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConfig_Parse(t *testing.T) {
|
|
||||||
// Parse configuration.
|
|
||||||
var c graphite.Config
|
|
||||||
if _, err := toml.Decode(`
|
|
||||||
bind-address = ":8080"
|
|
||||||
database = "mydb"
|
|
||||||
enabled = true
|
|
||||||
protocol = "tcp"
|
|
||||||
batch-size=100
|
|
||||||
batch-timeout="1s"
|
|
||||||
consistency-level="one"
|
|
||||||
templates=["servers.* .host.measurement*"]
|
|
||||||
tags=["region=us-east"]
|
|
||||||
`, &c); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate configuration.
|
|
||||||
if c.BindAddress != ":8080" {
|
|
||||||
t.Fatalf("unexpected bind address: %s", c.BindAddress)
|
|
||||||
} else if c.Database != "mydb" {
|
|
||||||
t.Fatalf("unexpected database selected: %s", c.Database)
|
|
||||||
} else if c.Enabled != true {
|
|
||||||
t.Fatalf("unexpected graphite enabled: %v", c.Enabled)
|
|
||||||
} else if c.Protocol != "tcp" {
|
|
||||||
t.Fatalf("unexpected graphite protocol: %s", c.Protocol)
|
|
||||||
} else if c.BatchSize != 100 {
|
|
||||||
t.Fatalf("unexpected graphite batch size: %d", c.BatchSize)
|
|
||||||
} else if time.Duration(c.BatchTimeout) != time.Second {
|
|
||||||
t.Fatalf("unexpected graphite batch timeout: %v", c.BatchTimeout)
|
|
||||||
} else if c.ConsistencyLevel != "one" {
|
|
||||||
t.Fatalf("unexpected graphite consistency setting: %s", c.ConsistencyLevel)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(c.Templates) != 1 && c.Templates[0] != "servers.* .host.measurement*" {
|
|
||||||
t.Fatalf("unexpected graphite templates setting: %v", c.Templates)
|
|
||||||
}
|
|
||||||
if len(c.Tags) != 1 && c.Tags[0] != "regsion=us-east" {
|
|
||||||
t.Fatalf("unexpected graphite templates setting: %v", c.Tags)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidateEmptyTemplate(t *testing.T) {
|
|
||||||
c := graphite.NewConfig()
|
|
||||||
c.Templates = []string{""}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Templates = []string{" "}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidateTooManyField(t *testing.T) {
|
|
||||||
c := graphite.NewConfig()
|
|
||||||
c.Templates = []string{"a measurement b c"}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidateTemplatePatterns(t *testing.T) {
|
|
||||||
c := graphite.NewConfig()
|
|
||||||
c.Templates = []string{"*measurement"}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Templates = []string{".host.region"}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidateFilter(t *testing.T) {
|
|
||||||
c := graphite.NewConfig()
|
|
||||||
c.Templates = []string{".server measurement*"}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Templates = []string{". .server measurement*"}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Templates = []string{"server* measurement*"}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidateTemplateTags(t *testing.T) {
|
|
||||||
c := graphite.NewConfig()
|
|
||||||
c.Templates = []string{"*.server measurement* foo"}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Templates = []string{"*.server measurement* foo=bar="}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Templates = []string{"*.server measurement* foo=bar,"}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Templates = []string{"*.server measurement* ="}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidateDefaultTags(t *testing.T) {
|
|
||||||
c := graphite.NewConfig()
|
|
||||||
c.Tags = []string{"foo"}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Tags = []string{"foo=bar="}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Tags = []string{"foo=bar", ""}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Tags = []string{"="}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigValidateFilterDuplicates(t *testing.T) {
|
|
||||||
c := graphite.NewConfig()
|
|
||||||
c.Templates = []string{"foo measurement*", "foo .host.measurement"}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// duplicate default templates
|
|
||||||
c.Templates = []string{"measurement*", ".host.measurement"}
|
|
||||||
if err := c.Validate(); err == nil {
|
|
||||||
t.Errorf("config validate expected error. got nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
342
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/parser.go
generated
vendored
342
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/parser.go
generated
vendored
|
@ -1,342 +0,0 @@
|
||||||
package graphite
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
var defaultTemplate *template
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
var err error
|
|
||||||
defaultTemplate, err = NewTemplate("measurement*", nil, DefaultSeparator)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parser encapsulates a Graphite Parser.
|
|
||||||
type Parser struct {
|
|
||||||
matcher *matcher
|
|
||||||
tags tsdb.Tags
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options are configurable values that can be provided to a Parser
|
|
||||||
type Options struct {
|
|
||||||
Separator string
|
|
||||||
Templates []string
|
|
||||||
DefaultTags tsdb.Tags
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewParserWithOptions returns a graphite parser using the given options
|
|
||||||
func NewParserWithOptions(options Options) (*Parser, error) {
|
|
||||||
|
|
||||||
matcher := newMatcher()
|
|
||||||
matcher.AddDefaultTemplate(defaultTemplate)
|
|
||||||
|
|
||||||
for _, pattern := range options.Templates {
|
|
||||||
|
|
||||||
template := pattern
|
|
||||||
filter := ""
|
|
||||||
// Format is [filter] <template> [tag1=value1,tag2=value2]
|
|
||||||
parts := strings.Fields(pattern)
|
|
||||||
if len(parts) >= 2 {
|
|
||||||
if strings.Contains(parts[1], "=") {
|
|
||||||
template = parts[0]
|
|
||||||
} else {
|
|
||||||
filter = parts[0]
|
|
||||||
template = parts[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse out the default tags specific to this template
|
|
||||||
tags := tsdb.Tags{}
|
|
||||||
if strings.Contains(parts[len(parts)-1], "=") {
|
|
||||||
tagStrs := strings.Split(parts[len(parts)-1], ",")
|
|
||||||
for _, kv := range tagStrs {
|
|
||||||
parts := strings.Split(kv, "=")
|
|
||||||
tags[parts[0]] = parts[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpl, err := NewTemplate(template, tags, options.Separator)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
matcher.Add(filter, tmpl)
|
|
||||||
}
|
|
||||||
return &Parser{matcher: matcher, tags: options.DefaultTags}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewParser returns a GraphiteParser instance.
|
|
||||||
func NewParser(templates []string, defaultTags tsdb.Tags) (*Parser, error) {
|
|
||||||
return NewParserWithOptions(
|
|
||||||
Options{
|
|
||||||
Templates: templates,
|
|
||||||
DefaultTags: defaultTags,
|
|
||||||
Separator: DefaultSeparator,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse performs Graphite parsing of a single line.
|
|
||||||
func (p *Parser) Parse(line string) (tsdb.Point, error) {
|
|
||||||
// Break into 3 fields (name, value, timestamp).
|
|
||||||
fields := strings.Fields(line)
|
|
||||||
if len(fields) != 2 && len(fields) != 3 {
|
|
||||||
return nil, fmt.Errorf("received %q which doesn't have required fields", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
// decode the name and tags
|
|
||||||
matcher := p.matcher.Match(fields[0])
|
|
||||||
measurement, tags := matcher.Apply(fields[0])
|
|
||||||
|
|
||||||
// Could not extract measurement, use the raw value
|
|
||||||
if measurement == "" {
|
|
||||||
measurement = fields[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse value.
|
|
||||||
v, err := strconv.ParseFloat(fields[1], 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(`field "%s" value: %s`, fields[0], err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldValues := map[string]interface{}{"value": v}
|
|
||||||
|
|
||||||
// If no 3rd field, use now as timestamp
|
|
||||||
timestamp := time.Now().UTC()
|
|
||||||
|
|
||||||
if len(fields) == 3 {
|
|
||||||
// Parse timestamp.
|
|
||||||
unixTime, err := strconv.ParseFloat(fields[2], 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(`field "%s" time: %s`, fields[0], err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// -1 is a special value that gets converted to current UTC time
|
|
||||||
// See https://github.com/graphite-project/carbon/issues/54
|
|
||||||
if unixTime != float64(-1) {
|
|
||||||
// Check if we have fractional seconds
|
|
||||||
timestamp = time.Unix(int64(unixTime), int64((unixTime-math.Floor(unixTime))*float64(time.Second)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the default tags on the point if they are not already set
|
|
||||||
for k, v := range p.tags {
|
|
||||||
if _, ok := tags[k]; !ok {
|
|
||||||
tags[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
point := tsdb.NewPoint(measurement, tags, fieldValues, timestamp)
|
|
||||||
|
|
||||||
return point, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// template represents a pattern and tags to map a graphite metric string to a influxdb Point
|
|
||||||
type template struct {
|
|
||||||
tags []string
|
|
||||||
defaultTags tsdb.Tags
|
|
||||||
greedyMeasurement bool
|
|
||||||
separator string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTemplate(pattern string, defaultTags tsdb.Tags, separator string) (*template, error) {
|
|
||||||
tags := strings.Split(pattern, ".")
|
|
||||||
hasMeasurement := false
|
|
||||||
template := &template{tags: tags, defaultTags: defaultTags, separator: separator}
|
|
||||||
|
|
||||||
for _, tag := range tags {
|
|
||||||
if strings.HasPrefix(tag, "measurement") {
|
|
||||||
hasMeasurement = true
|
|
||||||
}
|
|
||||||
if tag == "measurement*" {
|
|
||||||
template.greedyMeasurement = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !hasMeasurement {
|
|
||||||
return nil, fmt.Errorf("no measurement specified for template. %q", pattern)
|
|
||||||
}
|
|
||||||
|
|
||||||
return template, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply extracts the template fields form the given line and returns the measurement
|
|
||||||
// name and tags
|
|
||||||
func (t *template) Apply(line string) (string, map[string]string) {
|
|
||||||
fields := strings.Split(line, ".")
|
|
||||||
var (
|
|
||||||
measurement []string
|
|
||||||
tags = make(map[string]string)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Set any default tags
|
|
||||||
for k, v := range t.defaultTags {
|
|
||||||
tags[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, tag := range t.tags {
|
|
||||||
if i >= len(fields) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if tag == "measurement" {
|
|
||||||
measurement = append(measurement, fields[i])
|
|
||||||
} else if tag == "measurement*" {
|
|
||||||
measurement = append(measurement, fields[i:]...)
|
|
||||||
break
|
|
||||||
} else if tag != "" {
|
|
||||||
tags[tag] = fields[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.Join(measurement, t.separator), tags
|
|
||||||
}
|
|
||||||
|
|
||||||
// matcher determines which template should be applied to a given metric
|
|
||||||
// based on a filter tree.
|
|
||||||
type matcher struct {
|
|
||||||
root *node
|
|
||||||
defaultTemplate *template
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMatcher() *matcher {
|
|
||||||
return &matcher{
|
|
||||||
root: &node{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add inserts the template in the filter tree based the given filter
|
|
||||||
func (m *matcher) Add(filter string, template *template) {
|
|
||||||
if filter == "" {
|
|
||||||
m.AddDefaultTemplate(template)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m.root.Insert(filter, template)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *matcher) AddDefaultTemplate(template *template) {
|
|
||||||
m.defaultTemplate = template
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match returns the template that matches the given graphite line
|
|
||||||
func (m *matcher) Match(line string) *template {
|
|
||||||
tmpl := m.root.Search(line)
|
|
||||||
if tmpl != nil {
|
|
||||||
return tmpl
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.defaultTemplate
|
|
||||||
}
|
|
||||||
|
|
||||||
// node is an item in a sorted k-ary tree. Each child is sorted by its value.
|
|
||||||
// The special value of "*", is always last.
|
|
||||||
type node struct {
|
|
||||||
value string
|
|
||||||
children nodes
|
|
||||||
template *template
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) insert(values []string, template *template) {
|
|
||||||
// Add the end, set the template
|
|
||||||
if len(values) == 0 {
|
|
||||||
n.template = template
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// See if the the current element already exists in the tree. If so, insert the
|
|
||||||
// into that sub-tree
|
|
||||||
for _, v := range n.children {
|
|
||||||
if v.value == values[0] {
|
|
||||||
v.insert(values[1:], template)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// New element, add it to the tree and sort the children
|
|
||||||
newNode := &node{value: values[0]}
|
|
||||||
n.children = append(n.children, newNode)
|
|
||||||
sort.Sort(&n.children)
|
|
||||||
|
|
||||||
// Now insert the rest of the tree into the new element
|
|
||||||
newNode.insert(values[1:], template)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert inserts the given string template into the tree. The filter string is separated
|
|
||||||
// on "." and each part is used as the path in the tree.
|
|
||||||
func (n *node) Insert(filter string, template *template) {
|
|
||||||
n.insert(strings.Split(filter, "."), template)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) search(lineParts []string) *template {
|
|
||||||
// Nothing to search
|
|
||||||
if len(lineParts) == 0 || len(n.children) == 0 {
|
|
||||||
return n.template
|
|
||||||
}
|
|
||||||
|
|
||||||
// If last element is a wildcard, don't include in this search since it's sorted
|
|
||||||
// to the end but lexicographically it would not always be and sort.Search assumes
|
|
||||||
// the slice is sorted.
|
|
||||||
length := len(n.children)
|
|
||||||
if n.children[length-1].value == "*" {
|
|
||||||
length -= 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the index of child with an exact match
|
|
||||||
i := sort.Search(length, func(i int) bool {
|
|
||||||
return n.children[i].value >= lineParts[0]
|
|
||||||
})
|
|
||||||
|
|
||||||
// Found an exact match, so search that child sub-tree
|
|
||||||
if i < len(n.children) && n.children[i].value == lineParts[0] {
|
|
||||||
return n.children[i].search(lineParts[1:])
|
|
||||||
}
|
|
||||||
// Not an exact match, see if we have a wildcard child to search
|
|
||||||
if n.children[len(n.children)-1].value == "*" {
|
|
||||||
return n.children[len(n.children)-1].search(lineParts[1:])
|
|
||||||
}
|
|
||||||
return n.template
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) Search(line string) *template {
|
|
||||||
return n.search(strings.Split(line, "."))
|
|
||||||
}
|
|
||||||
|
|
||||||
type nodes []*node
|
|
||||||
|
|
||||||
// Less returns a boolean indicating whether the filter at position j
|
|
||||||
// is less than the filter at position k. Filters are order by string
|
|
||||||
// comparison of each component parts. A wildcard value "*" is never
|
|
||||||
// less than a non-wildcard value.
|
|
||||||
//
|
|
||||||
// For example, the filters:
|
|
||||||
// "*.*"
|
|
||||||
// "servers.*"
|
|
||||||
// "servers.localhost"
|
|
||||||
// "*.localhost"
|
|
||||||
//
|
|
||||||
// Would be sorted as:
|
|
||||||
// "servers.localhost"
|
|
||||||
// "servers.*"
|
|
||||||
// "*.localhost"
|
|
||||||
// "*.*"
|
|
||||||
func (n *nodes) Less(j, k int) bool {
|
|
||||||
if (*n)[j].value == "*" && (*n)[k].value != "*" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if (*n)[j].value != "*" && (*n)[k].value == "*" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return (*n)[j].value < (*n)[k].value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *nodes) Swap(i, j int) { (*n)[i], (*n)[j] = (*n)[j], (*n)[i] }
|
|
||||||
func (n *nodes) Len() int { return len(*n) }
|
|
548
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/parser_test.go
generated
vendored
548
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/parser_test.go
generated
vendored
|
@ -1,548 +0,0 @@
|
||||||
package graphite_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/services/graphite"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
func BenchmarkParse(b *testing.B) {
|
|
||||||
p, err := graphite.NewParser([]string{
|
|
||||||
"*.* .wrong.measurement*",
|
|
||||||
"servers.* .host.measurement*",
|
|
||||||
"servers.localhost .host.measurement*",
|
|
||||||
"*.localhost .host.measurement*",
|
|
||||||
"*.*.cpu .host.measurement*",
|
|
||||||
"a.b.c .host.measurement*",
|
|
||||||
"influxd.*.foo .host.measurement*",
|
|
||||||
"prod.*.mem .host.measurement*",
|
|
||||||
}, nil)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
p.Parse("servers.localhost.cpu.load 11 1435077219")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTemplateApply(t *testing.T) {
|
|
||||||
var tests = []struct {
|
|
||||||
test string
|
|
||||||
input string
|
|
||||||
template string
|
|
||||||
measurement string
|
|
||||||
tags map[string]string
|
|
||||||
err string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
test: "metric only",
|
|
||||||
input: "cpu",
|
|
||||||
template: "measurement",
|
|
||||||
measurement: "cpu",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
test: "metric with single series",
|
|
||||||
input: "cpu.server01",
|
|
||||||
template: "measurement.hostname",
|
|
||||||
measurement: "cpu",
|
|
||||||
tags: map[string]string{"hostname": "server01"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
test: "metric with multiple series",
|
|
||||||
input: "cpu.us-west.server01",
|
|
||||||
template: "measurement.region.hostname",
|
|
||||||
measurement: "cpu",
|
|
||||||
tags: map[string]string{"hostname": "server01", "region": "us-west"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
test: "no metric",
|
|
||||||
tags: make(map[string]string),
|
|
||||||
err: `no measurement specified for template. ""`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
test: "ignore unnamed",
|
|
||||||
input: "foo.cpu",
|
|
||||||
template: "measurement",
|
|
||||||
measurement: "foo",
|
|
||||||
tags: make(map[string]string),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
test: "name shorter than template",
|
|
||||||
input: "foo",
|
|
||||||
template: "measurement.A.B.C",
|
|
||||||
measurement: "foo",
|
|
||||||
tags: make(map[string]string),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
test: "wildcard measurement at end",
|
|
||||||
input: "prod.us-west.server01.cpu.load",
|
|
||||||
template: "env.zone.host.measurement*",
|
|
||||||
measurement: "cpu.load",
|
|
||||||
tags: map[string]string{"env": "prod", "zone": "us-west", "host": "server01"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
test: "skip fields",
|
|
||||||
input: "ignore.us-west.ignore-this-too.cpu.load",
|
|
||||||
template: ".zone..measurement*",
|
|
||||||
measurement: "cpu.load",
|
|
||||||
tags: map[string]string{"zone": "us-west"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
tmpl, err := graphite.NewTemplate(test.template, nil, graphite.DefaultSeparator)
|
|
||||||
if errstr(err) != test.err {
|
|
||||||
t.Fatalf("err does not match. expected %v, got %v", test.err, err)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
// If we erred out,it was intended and the following tests won't work
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
measurement, tags := tmpl.Apply(test.input)
|
|
||||||
if measurement != test.measurement {
|
|
||||||
t.Fatalf("name parse failer. expected %v, got %v", test.measurement, measurement)
|
|
||||||
}
|
|
||||||
if len(tags) != len(test.tags) {
|
|
||||||
t.Fatalf("unexpected number of tags. expected %v, got %v", test.tags, tags)
|
|
||||||
}
|
|
||||||
for k, v := range test.tags {
|
|
||||||
if tags[k] != v {
|
|
||||||
t.Fatalf("unexpected tag value for tags[%s]. expected %q, got %q", k, v, tags[k])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseMissingMeasurement(t *testing.T) {
|
|
||||||
_, err := graphite.NewParser([]string{"a.b.c"}, nil)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("expected error creating parser, got nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParse(t *testing.T) {
|
|
||||||
testTime := time.Now().Round(time.Second)
|
|
||||||
epochTime := testTime.Unix()
|
|
||||||
strTime := strconv.FormatInt(epochTime, 10)
|
|
||||||
|
|
||||||
var tests = []struct {
|
|
||||||
test string
|
|
||||||
input string
|
|
||||||
measurement string
|
|
||||||
tags map[string]string
|
|
||||||
value float64
|
|
||||||
time time.Time
|
|
||||||
template string
|
|
||||||
err string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
test: "normal case",
|
|
||||||
input: `cpu.foo.bar 50 ` + strTime,
|
|
||||||
template: "measurement.foo.bar",
|
|
||||||
measurement: "cpu",
|
|
||||||
tags: map[string]string{
|
|
||||||
"foo": "foo",
|
|
||||||
"bar": "bar",
|
|
||||||
},
|
|
||||||
value: 50,
|
|
||||||
time: testTime,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
test: "metric only with float value",
|
|
||||||
input: `cpu 50.554 ` + strTime,
|
|
||||||
measurement: "cpu",
|
|
||||||
template: "measurement",
|
|
||||||
value: 50.554,
|
|
||||||
time: testTime,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
test: "missing metric",
|
|
||||||
input: `1419972457825`,
|
|
||||||
template: "measurement",
|
|
||||||
err: `received "1419972457825" which doesn't have required fields`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
test: "should error parsing invalid float",
|
|
||||||
input: `cpu 50.554z 1419972457825`,
|
|
||||||
template: "measurement",
|
|
||||||
err: `field "cpu" value: strconv.ParseFloat: parsing "50.554z": invalid syntax`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
test: "should error parsing invalid int",
|
|
||||||
input: `cpu 50z 1419972457825`,
|
|
||||||
template: "measurement",
|
|
||||||
err: `field "cpu" value: strconv.ParseFloat: parsing "50z": invalid syntax`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
test: "should error parsing invalid time",
|
|
||||||
input: `cpu 50.554 14199724z57825`,
|
|
||||||
template: "measurement",
|
|
||||||
err: `field "cpu" time: strconv.ParseFloat: parsing "14199724z57825": invalid syntax`,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
p, err := graphite.NewParser([]string{test.template}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating graphite parser: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
point, err := p.Parse(test.input)
|
|
||||||
if errstr(err) != test.err {
|
|
||||||
t.Fatalf("err does not match. expected %v, got %v", test.err, err)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
// If we erred out,it was intended and the following tests won't work
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if point.Name() != test.measurement {
|
|
||||||
t.Fatalf("name parse failer. expected %v, got %v", test.measurement, point.Name())
|
|
||||||
}
|
|
||||||
if len(point.Tags()) != len(test.tags) {
|
|
||||||
t.Fatalf("tags len mismatch. expected %d, got %d", len(test.tags), len(point.Tags()))
|
|
||||||
}
|
|
||||||
f := point.Fields()["value"].(float64)
|
|
||||||
if point.Fields()["value"] != f {
|
|
||||||
t.Fatalf("floatValue value mismatch. expected %v, got %v", test.value, f)
|
|
||||||
}
|
|
||||||
if point.Time().UnixNano()/1000000 != test.time.UnixNano()/1000000 {
|
|
||||||
t.Fatalf("time value mismatch. expected %v, got %v", test.time.UnixNano(), point.Time().UnixNano())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseNaN(t *testing.T) {
|
|
||||||
p, err := graphite.NewParser([]string{"measurement*"}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pt, err := p.Parse("servers.localhost.cpu_load NaN 1435077219")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parse error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := tsdb.NewPoint("servers.localhost.cpu_load",
|
|
||||||
tsdb.Tags{},
|
|
||||||
tsdb.Fields{"value": math.NaN()},
|
|
||||||
time.Unix(1435077219, 0))
|
|
||||||
|
|
||||||
if exp.String() != pt.String() {
|
|
||||||
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
if !math.IsNaN(pt.Fields()["value"].(float64)) {
|
|
||||||
t.Errorf("parse value mismatch: expected NaN")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilterMatchDefault(t *testing.T) {
|
|
||||||
p, err := graphite.NewParser([]string{"servers.localhost .host.measurement*"}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := tsdb.NewPoint("miss.servers.localhost.cpu_load",
|
|
||||||
tsdb.Tags{},
|
|
||||||
tsdb.Fields{"value": float64(11)},
|
|
||||||
time.Unix(1435077219, 0))
|
|
||||||
|
|
||||||
pt, err := p.Parse("miss.servers.localhost.cpu_load 11 1435077219")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parse error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp.String() != pt.String() {
|
|
||||||
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilterMatchMultipleMeasurement(t *testing.T) {
|
|
||||||
p, err := graphite.NewParser([]string{"servers.localhost .host.measurement.measurement*"}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := tsdb.NewPoint("cpu.cpu_load.10",
|
|
||||||
tsdb.Tags{"host": "localhost"},
|
|
||||||
tsdb.Fields{"value": float64(11)},
|
|
||||||
time.Unix(1435077219, 0))
|
|
||||||
|
|
||||||
pt, err := p.Parse("servers.localhost.cpu.cpu_load.10 11 1435077219")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parse error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp.String() != pt.String() {
|
|
||||||
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) {
|
|
||||||
p, err := graphite.NewParserWithOptions(graphite.Options{
|
|
||||||
Templates: []string{"servers.localhost .host.measurement.measurement*"},
|
|
||||||
Separator: "_",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := tsdb.NewPoint("cpu_cpu_load_10",
|
|
||||||
tsdb.Tags{"host": "localhost"},
|
|
||||||
tsdb.Fields{"value": float64(11)},
|
|
||||||
time.Unix(1435077219, 0))
|
|
||||||
|
|
||||||
pt, err := p.Parse("servers.localhost.cpu.cpu_load.10 11 1435077219")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parse error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp.String() != pt.String() {
|
|
||||||
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilterMatchSingle(t *testing.T) {
|
|
||||||
p, err := graphite.NewParser([]string{"servers.localhost .host.measurement*"}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := tsdb.NewPoint("cpu_load",
|
|
||||||
tsdb.Tags{"host": "localhost"},
|
|
||||||
tsdb.Fields{"value": float64(11)},
|
|
||||||
time.Unix(1435077219, 0))
|
|
||||||
|
|
||||||
pt, err := p.Parse("servers.localhost.cpu_load 11 1435077219")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parse error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp.String() != pt.String() {
|
|
||||||
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseNoMatch(t *testing.T) {
|
|
||||||
p, err := graphite.NewParser([]string{"servers.*.cpu .host.measurement.cpu.measurement"}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := tsdb.NewPoint("servers.localhost.memory.VmallocChunk",
|
|
||||||
tsdb.Tags{},
|
|
||||||
tsdb.Fields{"value": float64(11)},
|
|
||||||
time.Unix(1435077219, 0))
|
|
||||||
|
|
||||||
pt, err := p.Parse("servers.localhost.memory.VmallocChunk 11 1435077219")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parse error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp.String() != pt.String() {
|
|
||||||
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilterMatchWildcard(t *testing.T) {
|
|
||||||
p, err := graphite.NewParser([]string{"servers.* .host.measurement*"}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := tsdb.NewPoint("cpu_load",
|
|
||||||
tsdb.Tags{"host": "localhost"},
|
|
||||||
tsdb.Fields{"value": float64(11)},
|
|
||||||
time.Unix(1435077219, 0))
|
|
||||||
|
|
||||||
pt, err := p.Parse("servers.localhost.cpu_load 11 1435077219")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parse error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp.String() != pt.String() {
|
|
||||||
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilterMatchExactBeforeWildcard(t *testing.T) {
|
|
||||||
p, err := graphite.NewParser([]string{
|
|
||||||
"servers.* .wrong.measurement*",
|
|
||||||
"servers.localhost .host.measurement*"}, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := tsdb.NewPoint("cpu_load",
|
|
||||||
tsdb.Tags{"host": "localhost"},
|
|
||||||
tsdb.Fields{"value": float64(11)},
|
|
||||||
time.Unix(1435077219, 0))
|
|
||||||
|
|
||||||
pt, err := p.Parse("servers.localhost.cpu_load 11 1435077219")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parse error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp.String() != pt.String() {
|
|
||||||
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilterMatchMostLongestFilter(t *testing.T) {
|
|
||||||
p, err := graphite.NewParser([]string{
|
|
||||||
"*.* .wrong.measurement*",
|
|
||||||
"servers.* .wrong.measurement*",
|
|
||||||
"servers.localhost .wrong.measurement*",
|
|
||||||
"servers.localhost.cpu .host.resource.measurement*", // should match this
|
|
||||||
"*.localhost .wrong.measurement*",
|
|
||||||
}, nil)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := tsdb.NewPoint("cpu_load",
|
|
||||||
tsdb.Tags{"host": "localhost", "resource": "cpu"},
|
|
||||||
tsdb.Fields{"value": float64(11)},
|
|
||||||
time.Unix(1435077219, 0))
|
|
||||||
|
|
||||||
pt, err := p.Parse("servers.localhost.cpu.cpu_load 11 1435077219")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parse error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp.String() != pt.String() {
|
|
||||||
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilterMatchMultipleWildcards(t *testing.T) {
|
|
||||||
p, err := graphite.NewParser([]string{
|
|
||||||
"*.* .wrong.measurement*",
|
|
||||||
"servers.* .host.measurement*", // should match this
|
|
||||||
"servers.localhost .wrong.measurement*",
|
|
||||||
"*.localhost .wrong.measurement*",
|
|
||||||
}, nil)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := tsdb.NewPoint("cpu_load",
|
|
||||||
tsdb.Tags{"host": "server01"},
|
|
||||||
tsdb.Fields{"value": float64(11)},
|
|
||||||
time.Unix(1435077219, 0))
|
|
||||||
|
|
||||||
pt, err := p.Parse("servers.server01.cpu_load 11 1435077219")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parse error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp.String() != pt.String() {
|
|
||||||
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseDefaultTags(t *testing.T) {
|
|
||||||
p, err := graphite.NewParser([]string{"servers.localhost .host.measurement*"}, tsdb.Tags{
|
|
||||||
"region": "us-east",
|
|
||||||
"zone": "1c",
|
|
||||||
"host": "should not set",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := tsdb.NewPoint("cpu_load",
|
|
||||||
tsdb.Tags{"host": "localhost", "region": "us-east", "zone": "1c"},
|
|
||||||
tsdb.Fields{"value": float64(11)},
|
|
||||||
time.Unix(1435077219, 0))
|
|
||||||
|
|
||||||
pt, err := p.Parse("servers.localhost.cpu_load 11 1435077219")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parse error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp.String() != pt.String() {
|
|
||||||
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseDefaultTemplateTags(t *testing.T) {
|
|
||||||
p, err := graphite.NewParser([]string{"servers.localhost .host.measurement* zone=1c"}, tsdb.Tags{
|
|
||||||
"region": "us-east",
|
|
||||||
"host": "should not set",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := tsdb.NewPoint("cpu_load",
|
|
||||||
tsdb.Tags{"host": "localhost", "region": "us-east", "zone": "1c"},
|
|
||||||
tsdb.Fields{"value": float64(11)},
|
|
||||||
time.Unix(1435077219, 0))
|
|
||||||
|
|
||||||
pt, err := p.Parse("servers.localhost.cpu_load 11 1435077219")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parse error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp.String() != pt.String() {
|
|
||||||
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseDefaultTemplateTagsOverridGlobal(t *testing.T) {
|
|
||||||
p, err := graphite.NewParser([]string{"servers.localhost .host.measurement* zone=1c,region=us-east"}, tsdb.Tags{
|
|
||||||
"region": "shot not be set",
|
|
||||||
"host": "should not set",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := tsdb.NewPoint("cpu_load",
|
|
||||||
tsdb.Tags{"host": "localhost", "region": "us-east", "zone": "1c"},
|
|
||||||
tsdb.Fields{"value": float64(11)},
|
|
||||||
time.Unix(1435077219, 0))
|
|
||||||
|
|
||||||
pt, err := p.Parse("servers.localhost.cpu_load 11 1435077219")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parse error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp.String() != pt.String() {
|
|
||||||
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseTemplateWhitespace(t *testing.T) {
|
|
||||||
p, err := graphite.NewParser([]string{"servers.localhost .host.measurement* zone=1c"}, tsdb.Tags{
|
|
||||||
"region": "us-east",
|
|
||||||
"host": "should not set",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error creating parser, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := tsdb.NewPoint("cpu_load",
|
|
||||||
tsdb.Tags{"host": "localhost", "region": "us-east", "zone": "1c"},
|
|
||||||
tsdb.Fields{"value": float64(11)},
|
|
||||||
time.Unix(1435077219, 0))
|
|
||||||
|
|
||||||
pt, err := p.Parse("servers.localhost.cpu_load 11 1435077219")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parse error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp.String() != pt.String() {
|
|
||||||
t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
|
|
||||||
}
|
|
||||||
}
|
|
267
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/service.go
generated
vendored
267
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/service.go
generated
vendored
|
@ -1,267 +0,0 @@
|
||||||
package graphite
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/cluster"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
udpBufferSize = 65536
|
|
||||||
leaderWaitTimeout = 30 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
type Service struct {
|
|
||||||
bindAddress string
|
|
||||||
database string
|
|
||||||
protocol string
|
|
||||||
batchSize int
|
|
||||||
batchTimeout time.Duration
|
|
||||||
consistencyLevel cluster.ConsistencyLevel
|
|
||||||
|
|
||||||
batcher *tsdb.PointBatcher
|
|
||||||
parser *Parser
|
|
||||||
|
|
||||||
logger *log.Logger
|
|
||||||
|
|
||||||
ln net.Listener
|
|
||||||
addr net.Addr
|
|
||||||
|
|
||||||
wg sync.WaitGroup
|
|
||||||
done chan struct{}
|
|
||||||
|
|
||||||
PointsWriter interface {
|
|
||||||
WritePoints(p *cluster.WritePointsRequest) error
|
|
||||||
}
|
|
||||||
MetaStore interface {
|
|
||||||
WaitForLeader(d time.Duration) error
|
|
||||||
CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewService returns an instance of the Graphite service.
|
|
||||||
func NewService(c Config) (*Service, error) {
|
|
||||||
// Use defaults where necessary.
|
|
||||||
d := c.WithDefaults()
|
|
||||||
|
|
||||||
s := Service{
|
|
||||||
bindAddress: d.BindAddress,
|
|
||||||
database: d.Database,
|
|
||||||
protocol: d.Protocol,
|
|
||||||
batchSize: d.BatchSize,
|
|
||||||
batchTimeout: time.Duration(d.BatchTimeout),
|
|
||||||
logger: log.New(os.Stderr, "[graphite] ", log.LstdFlags),
|
|
||||||
done: make(chan struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
consistencyLevel, err := cluster.ParseConsistencyLevel(d.ConsistencyLevel)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s.consistencyLevel = consistencyLevel
|
|
||||||
|
|
||||||
parser, err := NewParserWithOptions(Options{
|
|
||||||
Templates: d.Templates,
|
|
||||||
DefaultTags: d.DefaultTags(),
|
|
||||||
Separator: d.Separator})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s.parser = parser
|
|
||||||
|
|
||||||
return &s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open starts the Graphite input processing data.
|
|
||||||
func (s *Service) Open() error {
|
|
||||||
s.logger.Printf("Starting graphite service, batch size %d, batch timeout %s", s.batchSize, s.batchTimeout)
|
|
||||||
|
|
||||||
if err := s.MetaStore.WaitForLeader(leaderWaitTimeout); err != nil {
|
|
||||||
s.logger.Printf("Failed to detect a cluster leader: %s", err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := s.MetaStore.CreateDatabaseIfNotExists(s.database); err != nil {
|
|
||||||
s.logger.Printf("Failed to ensure target database %s exists: %s", s.database, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.batcher = tsdb.NewPointBatcher(s.batchSize, s.batchTimeout)
|
|
||||||
s.batcher.Start()
|
|
||||||
|
|
||||||
// Start processing batches.
|
|
||||||
s.wg.Add(1)
|
|
||||||
go s.processBatches(s.batcher)
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if strings.ToLower(s.protocol) == "tcp" {
|
|
||||||
s.addr, err = s.openTCPServer()
|
|
||||||
} else if strings.ToLower(s.protocol) == "udp" {
|
|
||||||
s.addr, err = s.openUDPServer()
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("unrecognized Graphite input protocol %s", s.protocol)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Printf("Listening on %s: %s", strings.ToUpper(s.protocol), s.addr.String())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close stops all data processing on the Graphite input.
|
|
||||||
func (s *Service) Close() error {
|
|
||||||
if s.ln != nil {
|
|
||||||
s.ln.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
s.batcher.Stop()
|
|
||||||
close(s.done)
|
|
||||||
s.wg.Wait()
|
|
||||||
s.done = nil
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogger sets the internal logger to the logger passed in.
|
|
||||||
func (s *Service) SetLogger(l *log.Logger) {
|
|
||||||
s.logger = l
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) Addr() net.Addr {
|
|
||||||
return s.addr
|
|
||||||
}
|
|
||||||
|
|
||||||
// openTCPServer opens the Graphite input in TCP mode and starts processing data.
|
|
||||||
func (s *Service) openTCPServer() (net.Addr, error) {
|
|
||||||
ln, err := net.Listen("tcp", s.bindAddress)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s.ln = ln
|
|
||||||
|
|
||||||
s.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer s.wg.Done()
|
|
||||||
for {
|
|
||||||
conn, err := s.ln.Accept()
|
|
||||||
if opErr, ok := err.(*net.OpError); ok && !opErr.Temporary() {
|
|
||||||
s.logger.Println("graphite TCP listener closed")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Println("error accepting TCP connection", err.Error())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
s.wg.Add(1)
|
|
||||||
go s.handleTCPConnection(conn)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return ln.Addr(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleTCPConnection services an individual TCP connection for the Graphite input.
|
|
||||||
func (s *Service) handleTCPConnection(conn net.Conn) {
|
|
||||||
defer conn.Close()
|
|
||||||
defer s.wg.Done()
|
|
||||||
|
|
||||||
reader := bufio.NewReader(conn)
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Read up to the next newline.
|
|
||||||
buf, err := reader.ReadBytes('\n')
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trim the buffer, even though there should be no padding
|
|
||||||
line := strings.TrimSpace(string(buf))
|
|
||||||
|
|
||||||
s.handleLine(line)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// openUDPServer opens the Graphite input in UDP mode and starts processing incoming data.
|
|
||||||
func (s *Service) openUDPServer() (net.Addr, error) {
|
|
||||||
addr, err := net.ResolveUDPAddr("udp", s.bindAddress)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
conn, err := net.ListenUDP("udp", addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := make([]byte, udpBufferSize)
|
|
||||||
s.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer s.wg.Done()
|
|
||||||
for {
|
|
||||||
n, _, err := conn.ReadFromUDP(buf)
|
|
||||||
if err != nil {
|
|
||||||
conn.Close()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, line := range strings.Split(string(buf[:n]), "\n") {
|
|
||||||
s.handleLine(line)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return conn.LocalAddr(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) handleLine(line string) {
|
|
||||||
if line == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Parse it.
|
|
||||||
point, err := s.parser.Parse(line)
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Printf("unable to parse line: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
f, ok := point.Fields()["value"].(float64)
|
|
||||||
if ok {
|
|
||||||
// Drop NaN and +/-Inf data points since they are not supported values
|
|
||||||
if math.IsNaN(f) || math.IsInf(f, 0) {
|
|
||||||
s.logger.Printf("dropping unsupported value: '%v'", line)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.batcher.In() <- point
|
|
||||||
}
|
|
||||||
|
|
||||||
// processBatches continually drains the given batcher and writes the batches to the database.
|
|
||||||
func (s *Service) processBatches(batcher *tsdb.PointBatcher) {
|
|
||||||
defer s.wg.Done()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case batch := <-batcher.Out():
|
|
||||||
if err := s.PointsWriter.WritePoints(&cluster.WritePointsRequest{
|
|
||||||
Database: s.database,
|
|
||||||
RetentionPolicy: "",
|
|
||||||
ConsistencyLevel: s.consistencyLevel,
|
|
||||||
Points: batch,
|
|
||||||
}); err != nil {
|
|
||||||
s.logger.Printf("failed to write point batch to database %q: %s", s.database, err)
|
|
||||||
}
|
|
||||||
case <-s.done:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
183
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/service_test.go
generated
vendored
183
Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/service_test.go
generated
vendored
|
@ -1,183 +0,0 @@
|
||||||
package graphite_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/cluster"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/services/graphite"
|
|
||||||
"github.com/influxdb/influxdb/toml"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Test_ServerGraphiteTCP(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
now := time.Now().UTC().Round(time.Second)
|
|
||||||
|
|
||||||
config := graphite.NewConfig()
|
|
||||||
config.Database = "graphitedb"
|
|
||||||
config.BatchSize = 0 // No batching.
|
|
||||||
config.BatchTimeout = toml.Duration(time.Second)
|
|
||||||
config.BindAddress = ":0"
|
|
||||||
|
|
||||||
service, err := graphite.NewService(config)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create Graphite service: %s", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allow test to wait until points are written.
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(1)
|
|
||||||
|
|
||||||
pointsWriter := PointsWriter{
|
|
||||||
WritePointsFn: func(req *cluster.WritePointsRequest) error {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
if req.Database != "graphitedb" {
|
|
||||||
t.Fatalf("unexpected database: %s", req.Database)
|
|
||||||
} else if req.RetentionPolicy != "" {
|
|
||||||
t.Fatalf("unexpected retention policy: %s", req.RetentionPolicy)
|
|
||||||
} else if req.Points[0].String() !=
|
|
||||||
tsdb.NewPoint(
|
|
||||||
"cpu",
|
|
||||||
map[string]string{},
|
|
||||||
map[string]interface{}{"value": 23.456},
|
|
||||||
time.Unix(now.Unix(), 0)).String() {
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
service.PointsWriter = &pointsWriter
|
|
||||||
dbCreator := DatabaseCreator{}
|
|
||||||
service.MetaStore = &dbCreator
|
|
||||||
|
|
||||||
if err := service.Open(); err != nil {
|
|
||||||
t.Fatalf("failed to open Graphite service: %s", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if !dbCreator.Created {
|
|
||||||
t.Fatalf("failed to create target database")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect to the graphite endpoint we just spun up
|
|
||||||
_, port, _ := net.SplitHostPort(service.Addr().String())
|
|
||||||
conn, err := net.Dial("tcp", "127.0.0.1:"+port)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
data := []byte(`cpu 23.456 `)
|
|
||||||
data = append(data, []byte(fmt.Sprintf("%d", now.Unix()))...)
|
|
||||||
data = append(data, '\n')
|
|
||||||
_, err = conn.Write(data)
|
|
||||||
conn.Close()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_ServerGraphiteUDP(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
now := time.Now().UTC().Round(time.Second)
|
|
||||||
|
|
||||||
config := graphite.NewConfig()
|
|
||||||
config.Database = "graphitedb"
|
|
||||||
config.BatchSize = 0 // No batching.
|
|
||||||
config.BatchTimeout = toml.Duration(time.Second)
|
|
||||||
config.BindAddress = ":10000"
|
|
||||||
config.Protocol = "udp"
|
|
||||||
|
|
||||||
service, err := graphite.NewService(config)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create Graphite service: %s", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allow test to wait until points are written.
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(1)
|
|
||||||
|
|
||||||
pointsWriter := PointsWriter{
|
|
||||||
WritePointsFn: func(req *cluster.WritePointsRequest) error {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
if req.Database != "graphitedb" {
|
|
||||||
t.Fatalf("unexpected database: %s", req.Database)
|
|
||||||
} else if req.RetentionPolicy != "" {
|
|
||||||
t.Fatalf("unexpected retention policy: %s", req.RetentionPolicy)
|
|
||||||
} else if req.Points[0].String() !=
|
|
||||||
tsdb.NewPoint(
|
|
||||||
"cpu",
|
|
||||||
map[string]string{},
|
|
||||||
map[string]interface{}{"value": 23.456},
|
|
||||||
time.Unix(now.Unix(), 0)).String() {
|
|
||||||
t.Fatalf("unexpected points: %#v", req.Points[0].String())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
service.PointsWriter = &pointsWriter
|
|
||||||
dbCreator := DatabaseCreator{}
|
|
||||||
service.MetaStore = &dbCreator
|
|
||||||
|
|
||||||
if err := service.Open(); err != nil {
|
|
||||||
t.Fatalf("failed to open Graphite service: %s", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if !dbCreator.Created {
|
|
||||||
t.Fatalf("failed to create target database")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect to the graphite endpoint we just spun up
|
|
||||||
_, port, _ := net.SplitHostPort(service.Addr().String())
|
|
||||||
conn, err := net.Dial("udp", "127.0.0.1:"+port)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
data := []byte(`cpu 23.456 `)
|
|
||||||
data = append(data, []byte(fmt.Sprintf("%d", now.Unix()))...)
|
|
||||||
data = append(data, '\n')
|
|
||||||
_, err = conn.Write(data)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
conn.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// PointsWriter represents a mock impl of PointsWriter.
|
|
||||||
type PointsWriter struct {
|
|
||||||
WritePointsFn func(*cluster.WritePointsRequest) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *PointsWriter) WritePoints(p *cluster.WritePointsRequest) error {
|
|
||||||
return w.WritePointsFn(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
type DatabaseCreator struct {
|
|
||||||
Created bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DatabaseCreator) CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error) {
|
|
||||||
d.Created = true
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DatabaseCreator) WaitForLeader(t time.Duration) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test Helpers
|
|
||||||
func errstr(err error) string {
|
|
||||||
if err != nil {
|
|
||||||
return err.Error()
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
|
@ -1,44 +0,0 @@
|
||||||
package hh
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/toml"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultMaxSize is the default maximum size of all hinted handoff queues in bytes.
|
|
||||||
DefaultMaxSize = 1024 * 1024 * 1024
|
|
||||||
|
|
||||||
// DefaultMaxAge is the default maximum amount of time that a hinted handoff write
|
|
||||||
// can stay in the queue. After this time, the write will be purged.
|
|
||||||
DefaultMaxAge = 7 * 24 * time.Hour
|
|
||||||
|
|
||||||
// DefaultRetryRateLimit is the default rate that hinted handoffs will be retried.
|
|
||||||
// The rate is in bytes per second and applies across all nodes when retried. A
|
|
||||||
// value of 0 disables the rate limit.
|
|
||||||
DefaultRetryRateLimit = 0
|
|
||||||
|
|
||||||
// DefaultRetryInterval is the default amout of time the system waits before
|
|
||||||
// attempting to flush hinted handoff queues.
|
|
||||||
DefaultRetryInterval = time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
Enabled bool `toml:"enabled"`
|
|
||||||
Dir string `toml:"dir"`
|
|
||||||
MaxSize int64 `toml:"max-size"`
|
|
||||||
MaxAge toml.Duration `toml:"max-age"`
|
|
||||||
RetryRateLimit int64 `toml:"retry-rate-limit"`
|
|
||||||
RetryInterval toml.Duration `toml:"retry-interval"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewConfig() Config {
|
|
||||||
return Config{
|
|
||||||
Enabled: true,
|
|
||||||
MaxSize: DefaultMaxSize,
|
|
||||||
MaxAge: toml.Duration(DefaultMaxAge),
|
|
||||||
RetryRateLimit: DefaultRetryRateLimit,
|
|
||||||
RetryInterval: toml.Duration(DefaultRetryInterval),
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,45 +0,0 @@
|
||||||
package hh_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
"github.com/influxdb/influxdb/services/hh"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConfigParse(t *testing.T) {
|
|
||||||
// Parse configuration.
|
|
||||||
var c hh.Config
|
|
||||||
if _, err := toml.Decode(`
|
|
||||||
enabled = false
|
|
||||||
retry-interval = "10m"
|
|
||||||
max-size=2048
|
|
||||||
max-age="20m"
|
|
||||||
retry-rate-limit=1000
|
|
||||||
`, &c); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate configuration.
|
|
||||||
if exp := true; c.Enabled == true {
|
|
||||||
t.Fatalf("unexpected enabled: got %v, exp %v", c.Enabled, exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := 10 * time.Minute; c.RetryInterval.String() != exp.String() {
|
|
||||||
t.Fatalf("unexpected retry interval: got %v, exp %v", c.RetryInterval, exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := 20 * time.Minute; c.MaxAge.String() != exp.String() {
|
|
||||||
t.Fatalf("unexpected max age: got %v, exp %v", c.MaxAge, exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := int64(2048); c.MaxSize != exp {
|
|
||||||
t.Fatalf("unexpected retry interval: got %v, exp %v", c.MaxSize, exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := int64(1000); c.RetryRateLimit != exp {
|
|
||||||
t.Fatalf("unexpected retry rate limit: got %v, exp %v", c.RetryRateLimit, exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,5 +0,0 @@
|
||||||
/*
|
|
||||||
Package hh implements a hinted handoff for writes
|
|
||||||
|
|
||||||
*/
|
|
||||||
package hh
|
|
|
@ -1,61 +0,0 @@
|
||||||
package hh
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
type limiter struct {
|
|
||||||
count int64
|
|
||||||
limit int64
|
|
||||||
start time.Time
|
|
||||||
delay float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRateLimiter returns a new limiter configured to restrict a process to the limit per second.
|
|
||||||
// limit is the maximum amount that can be used per second. The limit should be > 0. A limit
|
|
||||||
// <= 0, will not limit the processes.
|
|
||||||
func NewRateLimiter(limit int64) *limiter {
|
|
||||||
return &limiter{
|
|
||||||
start: time.Now(),
|
|
||||||
limit: limit,
|
|
||||||
delay: 0.5,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update updates the amount used
|
|
||||||
func (t *limiter) Update(count int) {
|
|
||||||
t.count += int64(count)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delay returns the amount of time, up to 1 second, that caller should wait
|
|
||||||
// to maintain the configured rate
|
|
||||||
func (t *limiter) Delay() time.Duration {
|
|
||||||
if t.limit > 0 {
|
|
||||||
|
|
||||||
delta := time.Now().Sub(t.start).Seconds()
|
|
||||||
rate := int64(float64(t.count) / delta)
|
|
||||||
|
|
||||||
// Determine how far off from the max rate we are
|
|
||||||
delayAdj := float64((t.limit - rate)) / float64(t.limit)
|
|
||||||
|
|
||||||
// Don't adjust by more than 1 second at a time
|
|
||||||
delayAdj = t.clamp(delayAdj, -1, 1)
|
|
||||||
|
|
||||||
t.delay -= delayAdj
|
|
||||||
if t.delay < 0 {
|
|
||||||
t.delay = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return time.Duration(t.delay) * time.Second
|
|
||||||
}
|
|
||||||
return time.Duration(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *limiter) clamp(value, min, max float64) float64 {
|
|
||||||
if value < min {
|
|
||||||
return min
|
|
||||||
}
|
|
||||||
|
|
||||||
if value > max {
|
|
||||||
return max
|
|
||||||
}
|
|
||||||
return value
|
|
||||||
}
|
|
47
Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/limiter_test.go
generated
vendored
47
Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/limiter_test.go
generated
vendored
|
@ -1,47 +0,0 @@
|
||||||
package hh
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLimiter(t *testing.T) {
|
|
||||||
l := NewRateLimiter(0)
|
|
||||||
l.Update(500)
|
|
||||||
if l.Delay().Nanoseconds() != 0 {
|
|
||||||
t.Errorf("limiter with no limit mismatch: got %v, exp 0", l.Delay())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLimiterWithinLimit(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Shipping TestLimiterWithinLimit")
|
|
||||||
}
|
|
||||||
|
|
||||||
l := NewRateLimiter(1000)
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
// 50 ever 100ms = 500/s which should be within the rate
|
|
||||||
l.Update(50)
|
|
||||||
l.Delay()
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should not have any delay
|
|
||||||
delay := l.Delay().Seconds()
|
|
||||||
if exp := int(0); int(delay) != exp {
|
|
||||||
t.Errorf("limiter rate mismatch: got %v, exp %v", int(delay), exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLimiterExceeded(t *testing.T) {
|
|
||||||
l := NewRateLimiter(1000)
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
l.Update(200)
|
|
||||||
l.Delay()
|
|
||||||
}
|
|
||||||
delay := l.Delay().Seconds()
|
|
||||||
if int(delay) == 0 {
|
|
||||||
t.Errorf("limiter rate mismatch. expected non-zero delay")
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,218 +0,0 @@
|
||||||
package hh
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Processor struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
|
|
||||||
dir string
|
|
||||||
maxSize int64
|
|
||||||
maxAge time.Duration
|
|
||||||
retryRateLimit int64
|
|
||||||
|
|
||||||
queues map[uint64]*queue
|
|
||||||
writer shardWriter
|
|
||||||
Logger *log.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
type ProcessorOptions struct {
|
|
||||||
MaxSize int64
|
|
||||||
RetryRateLimit int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewProcessor(dir string, writer shardWriter, options ProcessorOptions) (*Processor, error) {
|
|
||||||
p := &Processor{
|
|
||||||
dir: dir,
|
|
||||||
queues: map[uint64]*queue{},
|
|
||||||
writer: writer,
|
|
||||||
Logger: log.New(os.Stderr, "[handoff] ", log.LstdFlags),
|
|
||||||
}
|
|
||||||
p.setOptions(options)
|
|
||||||
|
|
||||||
// Create the root directory if it doesn't already exist.
|
|
||||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
|
||||||
return nil, fmt.Errorf("mkdir all: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.loadQueues(); err != nil {
|
|
||||||
return p, err
|
|
||||||
}
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Processor) setOptions(options ProcessorOptions) {
|
|
||||||
p.maxSize = DefaultMaxSize
|
|
||||||
if options.MaxSize != 0 {
|
|
||||||
p.maxSize = options.MaxSize
|
|
||||||
}
|
|
||||||
|
|
||||||
p.retryRateLimit = DefaultRetryRateLimit
|
|
||||||
if options.RetryRateLimit != 0 {
|
|
||||||
p.retryRateLimit = options.RetryRateLimit
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Processor) loadQueues() error {
|
|
||||||
files, err := ioutil.ReadDir(p.dir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, file := range files {
|
|
||||||
nodeID, err := strconv.ParseUint(file.Name(), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := p.addQueue(nodeID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Processor) addQueue(nodeID uint64) (*queue, error) {
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
|
|
||||||
path := filepath.Join(p.dir, strconv.FormatUint(nodeID, 10))
|
|
||||||
if err := os.MkdirAll(path, 0700); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
queue, err := newQueue(path, p.maxSize)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := queue.Open(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
p.queues[nodeID] = queue
|
|
||||||
return queue, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Processor) WriteShard(shardID, ownerID uint64, points []tsdb.Point) error {
|
|
||||||
queue, ok := p.queues[ownerID]
|
|
||||||
if !ok {
|
|
||||||
var err error
|
|
||||||
if queue, err = p.addQueue(ownerID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
b := p.marshalWrite(shardID, points)
|
|
||||||
return queue.Append(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Processor) Process() error {
|
|
||||||
p.mu.RLock()
|
|
||||||
defer p.mu.RUnlock()
|
|
||||||
|
|
||||||
res := make(chan error, len(p.queues))
|
|
||||||
for nodeID, q := range p.queues {
|
|
||||||
go func(nodeID uint64, q *queue) {
|
|
||||||
|
|
||||||
// Log how many writes we successfully sent at the end
|
|
||||||
var sent int
|
|
||||||
start := time.Now()
|
|
||||||
defer func(start time.Time) {
|
|
||||||
if sent > 0 {
|
|
||||||
p.Logger.Printf("%d queued writes sent to node %d in %s", sent, nodeID, time.Since(start))
|
|
||||||
}
|
|
||||||
}(start)
|
|
||||||
|
|
||||||
limiter := NewRateLimiter(p.retryRateLimit)
|
|
||||||
for {
|
|
||||||
// Get the current block from the queue
|
|
||||||
buf, err := q.Current()
|
|
||||||
if err != nil {
|
|
||||||
res <- nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// unmarshal the byte slice back to shard ID and points
|
|
||||||
shardID, points, err := p.unmarshalWrite(buf)
|
|
||||||
if err != nil {
|
|
||||||
p.Logger.Printf("unmarshal write failed: %v", err)
|
|
||||||
if err := q.Advance(); err != nil {
|
|
||||||
res <- err
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to send the write to the node
|
|
||||||
if err := p.writer.WriteShard(shardID, nodeID, points); err != nil && tsdb.IsRetryable(err) {
|
|
||||||
p.Logger.Printf("remote write failed: %v", err)
|
|
||||||
res <- nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we get here, the write succeeded so advance the queue to the next item
|
|
||||||
if err := q.Advance(); err != nil {
|
|
||||||
res <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
sent += 1
|
|
||||||
|
|
||||||
// Update how many bytes we've sent
|
|
||||||
limiter.Update(len(buf))
|
|
||||||
|
|
||||||
// Block to maintain the throughput rate
|
|
||||||
time.Sleep(limiter.Delay())
|
|
||||||
|
|
||||||
}
|
|
||||||
}(nodeID, q)
|
|
||||||
}
|
|
||||||
|
|
||||||
for range p.queues {
|
|
||||||
err := <-res
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Processor) marshalWrite(shardID uint64, points []tsdb.Point) []byte {
|
|
||||||
b := make([]byte, 8)
|
|
||||||
binary.BigEndian.PutUint64(b, shardID)
|
|
||||||
for _, p := range points {
|
|
||||||
b = append(b, []byte(p.String())...)
|
|
||||||
b = append(b, '\n')
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Processor) unmarshalWrite(b []byte) (uint64, []tsdb.Point, error) {
|
|
||||||
if len(b) < 8 {
|
|
||||||
return 0, nil, fmt.Errorf("too short: len = %d", len(b))
|
|
||||||
}
|
|
||||||
ownerID := binary.BigEndian.Uint64(b[:8])
|
|
||||||
points, err := tsdb.ParsePoints(b[8:])
|
|
||||||
return ownerID, points, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Processor) PurgeOlderThan(when time.Duration) error {
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
|
|
||||||
for _, queue := range p.queues {
|
|
||||||
if err := queue.PurgeOlderThan(time.Now().Add(-when)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
80
Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/processor_test.go
generated
vendored
80
Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/processor_test.go
generated
vendored
|
@ -1,80 +0,0 @@
|
||||||
package hh
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
type fakeShardWriter struct {
|
|
||||||
ShardWriteFn func(shardID, nodeID uint64, points []tsdb.Point) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fakeShardWriter) WriteShard(shardID, nodeID uint64, points []tsdb.Point) error {
|
|
||||||
return f.ShardWriteFn(shardID, nodeID, points)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessorProcess(t *testing.T) {
|
|
||||||
dir, err := ioutil.TempDir("", "processor_test")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp dir: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// expected data to be queue and sent to the shardWriter
|
|
||||||
var expShardID, expNodeID, count = uint64(100), uint64(200), 0
|
|
||||||
pt := tsdb.NewPoint("cpu", tsdb.Tags{"foo": "bar"}, tsdb.Fields{"value": 1.0}, time.Unix(0, 0))
|
|
||||||
|
|
||||||
sh := &fakeShardWriter{
|
|
||||||
ShardWriteFn: func(shardID, nodeID uint64, points []tsdb.Point) error {
|
|
||||||
count += 1
|
|
||||||
if shardID != expShardID {
|
|
||||||
t.Errorf("Process() shardID mismatch: got %v, exp %v", shardID, expShardID)
|
|
||||||
}
|
|
||||||
if nodeID != expNodeID {
|
|
||||||
t.Errorf("Process() nodeID mismatch: got %v, exp %v", nodeID, expNodeID)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := 1; len(points) != exp {
|
|
||||||
t.Fatalf("Process() points mismatch: got %v, exp %v", len(points), exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
if points[0].String() != pt.String() {
|
|
||||||
t.Fatalf("Process() points mismatch:\n got %v\n exp %v", points[0].String(), pt.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := NewProcessor(dir, sh, ProcessorOptions{MaxSize: 1024})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Process() failed to create processor: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This should queue the writes
|
|
||||||
if err := p.WriteShard(expShardID, expNodeID, []tsdb.Point{pt}); err != nil {
|
|
||||||
t.Fatalf("Process() failed to write points: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This should send the write to the shard writer
|
|
||||||
if err := p.Process(); err != nil {
|
|
||||||
t.Fatalf("Process() failed to write points: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := 1; count != exp {
|
|
||||||
t.Fatalf("Process() write count mismatch: got %v, exp %v", count, exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Queue should be empty so no writes should be send again
|
|
||||||
if err := p.Process(); err != nil {
|
|
||||||
t.Fatalf("Process() failed to write points: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count should stay the same
|
|
||||||
if exp := 1; count != exp {
|
|
||||||
t.Fatalf("Process() write count mismatch: got %v, exp %v", count, exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,666 +0,0 @@
|
||||||
package hh
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrNotOpen = fmt.Errorf("queue not open")
|
|
||||||
ErrQueueFull = fmt.Errorf("queue is full")
|
|
||||||
ErrSegmentFull = fmt.Errorf("segment is full")
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultSegmentSize = 10 * 1024 * 1024
|
|
||||||
footerSize = 8
|
|
||||||
)
|
|
||||||
|
|
||||||
// queue is a bounded, disk-backed, append-only type that combines queue and
|
|
||||||
// log semantics. byte slices can be appended and read back in-order.
|
|
||||||
// The queue maintains a pointer to the current head
|
|
||||||
// byte slice and can re-read from the head until it has been advanced.
|
|
||||||
//
|
|
||||||
// Internally, the queue writes byte slices to multiple segment files so
|
|
||||||
// that disk space can be reclaimed. When a segment file is larger than
|
|
||||||
// the max segment size, a new file is created. Segments are removed
|
|
||||||
// after their head pointer has advanced past the last entry. The first
|
|
||||||
// segment is the head, and the last segment is the tail. Reads are from
|
|
||||||
// the head segment and writes tail segment.
|
|
||||||
//
|
|
||||||
// queues can have a max size configured such that when the size of all
|
|
||||||
// segments on disk exceeds the size, write will fail.
|
|
||||||
//
|
|
||||||
// ┌─────┐
|
|
||||||
// │Head │
|
|
||||||
// ├─────┘
|
|
||||||
// │
|
|
||||||
// ▼
|
|
||||||
// ┌─────────────────┐ ┌─────────────────┐┌─────────────────┐
|
|
||||||
// │Segment 1 - 10MB │ │Segment 2 - 10MB ││Segment 3 - 10MB │
|
|
||||||
// └─────────────────┘ └─────────────────┘└─────────────────┘
|
|
||||||
// ▲
|
|
||||||
// │
|
|
||||||
// │
|
|
||||||
// ┌─────┐
|
|
||||||
// │Tail │
|
|
||||||
// └─────┘
|
|
||||||
type queue struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
|
|
||||||
// Directory to create segments
|
|
||||||
dir string
|
|
||||||
|
|
||||||
// The head and tail segments. Reads are from the beginning of head,
|
|
||||||
// writes are appended to the tail.
|
|
||||||
head, tail *segment
|
|
||||||
|
|
||||||
// The maximum size in bytes of a segment file before a new one should be created
|
|
||||||
maxSegmentSize int64
|
|
||||||
|
|
||||||
// The maximum size allowed in bytes of all segments before writes will return
|
|
||||||
// an error
|
|
||||||
maxSize int64
|
|
||||||
|
|
||||||
// The segments that exist on disk
|
|
||||||
segments segments
|
|
||||||
}
|
|
||||||
|
|
||||||
type segments []*segment
|
|
||||||
|
|
||||||
// newQueue create a queue that will store segments in dir and that will
|
|
||||||
// consume more than maxSize on disk.
|
|
||||||
func newQueue(dir string, maxSize int64) (*queue, error) {
|
|
||||||
return &queue{
|
|
||||||
dir: dir,
|
|
||||||
maxSegmentSize: defaultSegmentSize,
|
|
||||||
maxSize: maxSize,
|
|
||||||
segments: segments{},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens the queue for reading and writing
|
|
||||||
func (l *queue) Open() error {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
|
|
||||||
segments, err := l.loadSegments()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
l.segments = segments
|
|
||||||
|
|
||||||
if len(l.segments) == 0 {
|
|
||||||
_, err := l.addSegment()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
l.head = l.segments[0]
|
|
||||||
l.tail = l.segments[len(l.segments)-1]
|
|
||||||
|
|
||||||
// If the head has been fully advanced and the segment size is modified,
|
|
||||||
// existing segments an get stuck and never allow clients to advance further.
|
|
||||||
// This advances the segment if the current head is already at the end.
|
|
||||||
_, err = l.head.current()
|
|
||||||
if err == io.EOF {
|
|
||||||
return l.trimHead()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close stops the queue for reading and writing
|
|
||||||
func (l *queue) Close() error {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
|
|
||||||
for _, s := range l.segments {
|
|
||||||
if err := s.close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
l.head = nil
|
|
||||||
l.tail = nil
|
|
||||||
l.segments = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMaxSegmentSize updates the max segment size for new and existing
|
|
||||||
// segments.
|
|
||||||
func (l *queue) SetMaxSegmentSize(size int64) error {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
|
|
||||||
l.maxSegmentSize = size
|
|
||||||
|
|
||||||
for _, s := range l.segments {
|
|
||||||
s.SetMaxSegmentSize(size)
|
|
||||||
}
|
|
||||||
|
|
||||||
if l.tail.diskUsage() >= l.maxSegmentSize {
|
|
||||||
segment, err := l.addSegment()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
l.tail = segment
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *queue) PurgeOlderThan(when time.Time) error {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
|
|
||||||
// Add a new empty segment so old ones can be reclaimed
|
|
||||||
if _, err := l.addSegment(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cutoff := when.Truncate(time.Second)
|
|
||||||
for {
|
|
||||||
mod, err := l.head.lastModified()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if mod.After(cutoff) || mod.Equal(cutoff) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := l.trimHead(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// diskUsage returns the total size on disk used by the queue
|
|
||||||
func (l *queue) diskUsage() int64 {
|
|
||||||
var size int64
|
|
||||||
for _, s := range l.segments {
|
|
||||||
size += s.diskUsage()
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
|
|
||||||
// addSegment creates a new empty segment file
|
|
||||||
func (l *queue) addSegment() (*segment, error) {
|
|
||||||
nextID, err := l.nextSegmentID()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
segment, err := newSegment(filepath.Join(l.dir, strconv.FormatUint(nextID, 10)), l.maxSegmentSize)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
l.segments = append(l.segments, segment)
|
|
||||||
return segment, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadSegments loads all segments on disk
|
|
||||||
func (l *queue) loadSegments() (segments, error) {
|
|
||||||
segments := []*segment{}
|
|
||||||
|
|
||||||
files, err := ioutil.ReadDir(l.dir)
|
|
||||||
if err != nil {
|
|
||||||
return segments, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, segment := range files {
|
|
||||||
// Segments should be files. Skip anything that is not a dir.
|
|
||||||
if segment.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Segments file names are all numeric
|
|
||||||
_, err := strconv.ParseUint(segment.Name(), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
segment, err := newSegment(filepath.Join(l.dir, segment.Name()), l.maxSegmentSize)
|
|
||||||
if err != nil {
|
|
||||||
return segments, err
|
|
||||||
}
|
|
||||||
|
|
||||||
segments = append(segments, segment)
|
|
||||||
}
|
|
||||||
return segments, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// nextSegmentID returns the next segment ID that is free
|
|
||||||
func (l *queue) nextSegmentID() (uint64, error) {
|
|
||||||
segments, err := ioutil.ReadDir(l.dir)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var maxID uint64
|
|
||||||
for _, segment := range segments {
|
|
||||||
// Segments should be files. Skip anything that is not a dir.
|
|
||||||
if segment.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Segments file names are all numeric
|
|
||||||
segmentID, err := strconv.ParseUint(segment.Name(), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if segmentID > maxID {
|
|
||||||
maxID = segmentID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return maxID + 1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append appends a byte slice to the end of the queue
|
|
||||||
func (l *queue) Append(b []byte) error {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
|
|
||||||
if l.tail == nil {
|
|
||||||
return ErrNotOpen
|
|
||||||
}
|
|
||||||
|
|
||||||
if l.diskUsage()+int64(len(b)) > l.maxSize {
|
|
||||||
return ErrQueueFull
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append the entry to the tail, if the segment is full,
|
|
||||||
// try to create new segment and retry the append
|
|
||||||
if err := l.tail.append(b); err == ErrSegmentFull {
|
|
||||||
segment, err := l.addSegment()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
l.tail = segment
|
|
||||||
return l.tail.append(b)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Current returns the current byte slice at the head of the queue
|
|
||||||
func (l *queue) Current() ([]byte, error) {
|
|
||||||
if l.head == nil {
|
|
||||||
return nil, ErrNotOpen
|
|
||||||
}
|
|
||||||
|
|
||||||
return l.head.current()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Advance moves the head point to the next byte slice in the queue
|
|
||||||
func (l *queue) Advance() error {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
if l.head == nil {
|
|
||||||
return ErrNotOpen
|
|
||||||
}
|
|
||||||
|
|
||||||
err := l.head.advance()
|
|
||||||
if err == io.EOF {
|
|
||||||
if err := l.trimHead(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *queue) trimHead() error {
|
|
||||||
if len(l.segments) > 1 {
|
|
||||||
l.segments = l.segments[1:]
|
|
||||||
|
|
||||||
if err := l.head.close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := os.Remove(l.head.path); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
l.head = l.segments[0]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Segment is a queue using a single file. The structure of a segment is a series
|
|
||||||
// lengths + block with a single footer point to the position in the segment of the
|
|
||||||
// current head block.
|
|
||||||
//
|
|
||||||
// ┌──────────────────────────┐ ┌──────────────────────────┐ ┌────────────┐
|
|
||||||
// │ Block 1 │ │ Block 2 │ │ Footer │
|
|
||||||
// └──────────────────────────┘ └──────────────────────────┘ └────────────┘
|
|
||||||
// ┌────────────┐┌────────────┐ ┌────────────┐┌────────────┐ ┌────────────┐
|
|
||||||
// │Block 1 Len ││Block 1 Body│ │Block 2 Len ││Block 2 Body│ │Head Offset │
|
|
||||||
// │ 8 bytes ││ N bytes │ │ 8 bytes ││ N bytes │ │ 8 bytes │
|
|
||||||
// └────────────┘└────────────┘ └────────────┘└────────────┘ └────────────┘
|
|
||||||
//
|
|
||||||
// The footer holds the pointer to the head entry at the end of the segment to allow writes
|
|
||||||
// to seek to the end and write sequentially (vs having to seek back to the beginning of
|
|
||||||
// the segment to update the head pointer). Reads must seek to the end then back into the
|
|
||||||
// segment offset stored in the footer.
|
|
||||||
//
|
|
||||||
// Segments store arbitrary byte slices and leave the serialization to the caller. Segments
|
|
||||||
// are created with a max size and will block writes when the segment is full.
|
|
||||||
type segment struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
|
|
||||||
size int64
|
|
||||||
file *os.File
|
|
||||||
path string
|
|
||||||
|
|
||||||
pos int64
|
|
||||||
currentSize int64
|
|
||||||
maxSize int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSegment(path string, maxSize int64) (*segment, error) {
|
|
||||||
f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0600)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
stats, err := os.Stat(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
s := &segment{file: f, path: path, size: stats.Size(), maxSize: maxSize}
|
|
||||||
|
|
||||||
if err := s.open(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *segment) open() error {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
|
|
||||||
// If it's a new segment then write the location of the current record in this segment
|
|
||||||
if l.size == 0 {
|
|
||||||
l.pos = 0
|
|
||||||
l.currentSize = 0
|
|
||||||
|
|
||||||
if err := l.writeUint64(uint64(l.pos)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.file.Sync(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
l.size = footerSize
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Existing segment so read the current position and the size of the current block
|
|
||||||
if err := l.seekEnd(-footerSize); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
pos, err := l.readUint64()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
l.pos = int64(pos)
|
|
||||||
|
|
||||||
if err := l.seekToCurrent(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're at the end of the segment, don't read the current block size,
|
|
||||||
// it's 0.
|
|
||||||
if l.pos < l.size-footerSize {
|
|
||||||
currentSize, err := l.readUint64()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
l.currentSize = int64(currentSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// append adds byte slice to the end of segment
|
|
||||||
func (l *segment) append(b []byte) error {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
|
|
||||||
if l.file == nil {
|
|
||||||
return ErrNotOpen
|
|
||||||
}
|
|
||||||
|
|
||||||
if l.size+int64(len(b)) > l.maxSize {
|
|
||||||
return ErrSegmentFull
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.seekEnd(-footerSize); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.writeUint64(uint64(len(b))); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.writeBytes(b); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.writeUint64(uint64(l.pos)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.file.Sync(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if l.currentSize == 0 {
|
|
||||||
l.currentSize = int64(len(b))
|
|
||||||
}
|
|
||||||
|
|
||||||
l.size += int64(len(b)) + 8 // uint64 for slice length
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// current returns byte slice that the current segment points
|
|
||||||
func (l *segment) current() ([]byte, error) {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
|
|
||||||
if int64(l.pos) == l.size-8 {
|
|
||||||
return nil, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.seekToCurrent(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// read the record size
|
|
||||||
sz, err := l.readUint64()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
l.currentSize = int64(sz)
|
|
||||||
|
|
||||||
if int64(sz) > l.maxSize {
|
|
||||||
return nil, fmt.Errorf("record size out of range: max %d: got %d", l.maxSize, sz)
|
|
||||||
}
|
|
||||||
|
|
||||||
b := make([]byte, sz)
|
|
||||||
if err := l.readBytes(b); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// advance advances the current value pointer
|
|
||||||
func (l *segment) advance() error {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
|
|
||||||
if l.file == nil {
|
|
||||||
return ErrNotOpen
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're at the end of the file, can't advance
|
|
||||||
if int64(l.pos) == l.size-footerSize {
|
|
||||||
l.currentSize = 0
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.seekEnd(-footerSize); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
pos := l.pos + l.currentSize + 8
|
|
||||||
if err := l.writeUint64(uint64(pos)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.file.Sync(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
l.pos = pos
|
|
||||||
|
|
||||||
if err := l.seekToCurrent(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
sz, err := l.readUint64()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
l.currentSize = int64(sz)
|
|
||||||
|
|
||||||
if int64(l.pos) == l.size-footerSize {
|
|
||||||
l.currentSize = 0
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *segment) close() error {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
if err := l.file.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
l.file = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *segment) lastModified() (time.Time, error) {
|
|
||||||
l.mu.RLock()
|
|
||||||
defer l.mu.RUnlock()
|
|
||||||
|
|
||||||
stats, err := os.Stat(l.file.Name())
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, err
|
|
||||||
}
|
|
||||||
return stats.ModTime(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *segment) diskUsage() int64 {
|
|
||||||
l.mu.RLock()
|
|
||||||
defer l.mu.RUnlock()
|
|
||||||
return l.size
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *segment) SetMaxSegmentSize(size int64) {
|
|
||||||
l.mu.Lock()
|
|
||||||
defer l.mu.Unlock()
|
|
||||||
l.maxSize = size
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *segment) seekToCurrent() error {
|
|
||||||
return l.seek(int64(l.pos))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *segment) seek(pos int64) error {
|
|
||||||
n, err := l.file.Seek(pos, os.SEEK_SET)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if n != pos {
|
|
||||||
return fmt.Errorf("bad seek. exp %v, got %v", 0, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *segment) seekEnd(pos int64) error {
|
|
||||||
_, err := l.file.Seek(pos, os.SEEK_END)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *segment) filePos() int64 {
|
|
||||||
n, _ := l.file.Seek(0, os.SEEK_CUR)
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *segment) readUint64() (uint64, error) {
|
|
||||||
b := make([]byte, 8)
|
|
||||||
if err := l.readBytes(b); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return btou64(b), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *segment) writeUint64(sz uint64) error {
|
|
||||||
return l.writeBytes(u64tob(sz))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *segment) writeBytes(b []byte) error {
|
|
||||||
n, err := l.file.Write(b)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if n != len(b) {
|
|
||||||
return fmt.Errorf("short write. got %d, exp %d", n, len(b))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *segment) readBytes(b []byte) error {
|
|
||||||
n, err := l.file.Read(b)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if n != len(b) {
|
|
||||||
return fmt.Errorf("bad read. exp %v, got %v", 0, n)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func u64tob(v uint64) []byte {
|
|
||||||
b := make([]byte, 8)
|
|
||||||
binary.BigEndian.PutUint64(b, v)
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func btou64(b []byte) uint64 {
|
|
||||||
return binary.BigEndian.Uint64(b)
|
|
||||||
}
|
|
|
@ -1,327 +0,0 @@
|
||||||
package hh
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func BenchmarkQueueAppend(b *testing.B) {
|
|
||||||
dir, err := ioutil.TempDir("", "hh_queue")
|
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("failed to create temp dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
q, err := newQueue(dir, 1024*1024*1024)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("failed to create queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Open(); err != nil {
|
|
||||||
b.Fatalf("failed to open queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
if err := q.Append([]byte(fmt.Sprintf("%d", i))); err != nil {
|
|
||||||
println(q.diskUsage())
|
|
||||||
b.Fatalf("Queue.Append failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestQueueAppendOne(t *testing.T) {
|
|
||||||
dir, err := ioutil.TempDir("", "hh_queue")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
q, err := newQueue(dir, 1024)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Open(); err != nil {
|
|
||||||
t.Fatalf("failed to open queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Append([]byte("test")); err != nil {
|
|
||||||
t.Fatalf("Queue.Append failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
exp := filepath.Join(dir, "1")
|
|
||||||
stats, err := os.Stat(exp)
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
t.Fatalf("Queue.Append file not exists. exp %v to exist", exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 8 byte header ptr + 8 byte record len + record len
|
|
||||||
if exp := int64(8 + 8 + 4); stats.Size() != exp {
|
|
||||||
t.Fatalf("Queue.Append file size mismatch. got %v, exp %v", stats.Size(), exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
cur, err := q.Current()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Queue.Current failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := "test"; string(cur) != exp {
|
|
||||||
t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestQueueAppendMultiple(t *testing.T) {
|
|
||||||
dir, err := ioutil.TempDir("", "hh_queue")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
q, err := newQueue(dir, 1024)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Open(); err != nil {
|
|
||||||
t.Fatalf("failed to open queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Append([]byte("one")); err != nil {
|
|
||||||
t.Fatalf("Queue.Append failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Append([]byte("two")); err != nil {
|
|
||||||
t.Fatalf("Queue.Append failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, exp := range []string{"one", "two"} {
|
|
||||||
cur, err := q.Current()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Queue.Current failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if string(cur) != exp {
|
|
||||||
t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Advance(); err != nil {
|
|
||||||
t.Fatalf("Queue.Advance failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestQueueAdvancePastEnd(t *testing.T) {
|
|
||||||
dir, err := ioutil.TempDir("", "hh_queue")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
// create the queue
|
|
||||||
q, err := newQueue(dir, 1024)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Open(); err != nil {
|
|
||||||
t.Fatalf("failed to open queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// append one entry, should go to the first segment
|
|
||||||
if err := q.Append([]byte("one")); err != nil {
|
|
||||||
t.Fatalf("Queue.Append failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set the segment size low to force a new segment to be created
|
|
||||||
q.SetMaxSegmentSize(12)
|
|
||||||
|
|
||||||
// Should go into a new segment
|
|
||||||
if err := q.Append([]byte("two")); err != nil {
|
|
||||||
t.Fatalf("Queue.Append failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// should read from first segment
|
|
||||||
cur, err := q.Current()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Queue.Current failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := "one"; string(cur) != exp {
|
|
||||||
t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Advance(); err != nil {
|
|
||||||
t.Fatalf("Queue.Advance failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensure the first segment file is removed since we've advanced past the end
|
|
||||||
_, err = os.Stat(filepath.Join(dir, "1"))
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
t.Fatalf("Queue.Advance should have removed the segment")
|
|
||||||
}
|
|
||||||
|
|
||||||
// should read from second segment
|
|
||||||
cur, err = q.Current()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Queue.Current failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := "two"; string(cur) != exp {
|
|
||||||
t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = os.Stat(filepath.Join(dir, "2"))
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
t.Fatalf("Queue.Advance should have removed the segment")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Advance(); err != nil {
|
|
||||||
t.Fatalf("Queue.Advance failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cur, err = q.Current()
|
|
||||||
if err != io.EOF {
|
|
||||||
t.Fatalf("Queue.Current should have returned error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestQueueFull(t *testing.T) {
|
|
||||||
dir, err := ioutil.TempDir("", "hh_queue")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
// create the queue
|
|
||||||
q, err := newQueue(dir, 10)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Open(); err != nil {
|
|
||||||
t.Fatalf("failed to open queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Append([]byte("one")); err != ErrQueueFull {
|
|
||||||
t.Fatalf("Queue.Append expected to return queue full")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestQueueReopen(t *testing.T) {
|
|
||||||
dir, err := ioutil.TempDir("", "hh_queue")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
// create the queue
|
|
||||||
q, err := newQueue(dir, 1024)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Open(); err != nil {
|
|
||||||
t.Fatalf("failed to open queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Append([]byte("one")); err != nil {
|
|
||||||
t.Fatalf("Queue.Append failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cur, err := q.Current()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Queue.Current failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := "one"; string(cur) != exp {
|
|
||||||
t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
// close and re-open the queue
|
|
||||||
if err := q.Close(); err != nil {
|
|
||||||
t.Fatalf("Queue.Close failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Open(); err != nil {
|
|
||||||
t.Fatalf("failed to re-open queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure we can read back the last current value
|
|
||||||
cur, err = q.Current()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Queue.Current failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := "one"; string(cur) != exp {
|
|
||||||
t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Append([]byte("two")); err != nil {
|
|
||||||
t.Fatalf("Queue.Append failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Advance(); err != nil {
|
|
||||||
t.Fatalf("Queue.Advance failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cur, err = q.Current()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Queue.Current failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := "two"; string(cur) != exp {
|
|
||||||
t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPurgeQueue(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping purge queue")
|
|
||||||
}
|
|
||||||
|
|
||||||
dir, err := ioutil.TempDir("", "hh_queue")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
// create the queue
|
|
||||||
q, err := newQueue(dir, 1024)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Open(); err != nil {
|
|
||||||
t.Fatalf("failed to open queue: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := q.Append([]byte("one")); err != nil {
|
|
||||||
t.Fatalf("Queue.Append failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cur, err := q.Current()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Queue.Current failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp := "one"; string(cur) != exp {
|
|
||||||
t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp)
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
if err := q.PurgeOlderThan(time.Now()); err != nil {
|
|
||||||
t.Errorf("Queue.PurgeOlderThan failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = q.Current()
|
|
||||||
if err != io.EOF {
|
|
||||||
t.Fatalf("Queue.Current expected io.EOF, got: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,136 +0,0 @@
|
||||||
package hh
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
var ErrHintedHandoffDisabled = fmt.Errorf("hinted handoff disabled")
|
|
||||||
|
|
||||||
type Service struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
wg sync.WaitGroup
|
|
||||||
closing chan struct{}
|
|
||||||
|
|
||||||
Logger *log.Logger
|
|
||||||
cfg Config
|
|
||||||
|
|
||||||
ShardWriter shardWriter
|
|
||||||
|
|
||||||
HintedHandoff interface {
|
|
||||||
WriteShard(shardID, ownerID uint64, points []tsdb.Point) error
|
|
||||||
Process() error
|
|
||||||
PurgeOlderThan(when time.Duration) error
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type shardWriter interface {
|
|
||||||
WriteShard(shardID, ownerID uint64, points []tsdb.Point) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewService returns a new instance of Service.
|
|
||||||
func NewService(c Config, w shardWriter) *Service {
|
|
||||||
s := &Service{
|
|
||||||
cfg: c,
|
|
||||||
Logger: log.New(os.Stderr, "[handoff] ", log.LstdFlags),
|
|
||||||
}
|
|
||||||
processor, err := NewProcessor(c.Dir, w, ProcessorOptions{
|
|
||||||
MaxSize: c.MaxSize,
|
|
||||||
RetryRateLimit: c.RetryRateLimit,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
s.Logger.Fatalf("Failed to start hinted handoff processor: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
processor.Logger = s.Logger
|
|
||||||
s.HintedHandoff = processor
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) Open() error {
|
|
||||||
s.Logger.Printf("Starting hinted handoff service")
|
|
||||||
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
s.closing = make(chan struct{})
|
|
||||||
|
|
||||||
s.Logger.Printf("Using data dir: %v", s.cfg.Dir)
|
|
||||||
|
|
||||||
s.wg.Add(2)
|
|
||||||
go s.retryWrites()
|
|
||||||
go s.expireWrites()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) Close() error {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
if s.closing != nil {
|
|
||||||
close(s.closing)
|
|
||||||
}
|
|
||||||
s.wg.Wait()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogger sets the internal logger to the logger passed in.
|
|
||||||
func (s *Service) SetLogger(l *log.Logger) {
|
|
||||||
s.Logger = l
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteShard queues the points write for shardID to node ownerID to handoff queue
|
|
||||||
func (s *Service) WriteShard(shardID, ownerID uint64, points []tsdb.Point) error {
|
|
||||||
if !s.cfg.Enabled {
|
|
||||||
return ErrHintedHandoffDisabled
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.HintedHandoff.WriteShard(shardID, ownerID, points)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) retryWrites() {
|
|
||||||
defer s.wg.Done()
|
|
||||||
ticker := time.NewTicker(time.Duration(s.cfg.RetryInterval))
|
|
||||||
defer ticker.Stop()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-s.closing:
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
if err := s.HintedHandoff.Process(); err != nil && err != io.EOF {
|
|
||||||
s.Logger.Printf("retried write failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// expireWrites will cause the handoff queues to remove writes that are older
|
|
||||||
// than the configured threshold
|
|
||||||
func (s *Service) expireWrites() {
|
|
||||||
defer s.wg.Done()
|
|
||||||
ticker := time.NewTicker(time.Hour)
|
|
||||||
defer ticker.Stop()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-s.closing:
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
if err := s.HintedHandoff.PurgeOlderThan(time.Duration(s.cfg.MaxAge)); err != nil {
|
|
||||||
s.Logger.Printf("purge write failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// purgeWrites will cause the handoff queues to remove writes that are no longer
|
|
||||||
// valid. e.g. queued writes for a node that has been removed
|
|
||||||
func (s *Service) purgeWrites() {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
|
@ -1,22 +0,0 @@
|
||||||
package httpd
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
Enabled bool `toml:"enabled"`
|
|
||||||
BindAddress string `toml:"bind-address"`
|
|
||||||
AuthEnabled bool `toml:"auth-enabled"`
|
|
||||||
LogEnabled bool `toml:"log-enabled"`
|
|
||||||
WriteTracing bool `toml:"write-tracing"`
|
|
||||||
PprofEnabled bool `toml:"pprof-enabled"`
|
|
||||||
HttpsEnabled bool `toml:"https-enabled"`
|
|
||||||
HttpsCertificate string `toml:"https-certificate"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewConfig() Config {
|
|
||||||
return Config{
|
|
||||||
Enabled: true,
|
|
||||||
BindAddress: ":8086",
|
|
||||||
LogEnabled: true,
|
|
||||||
HttpsEnabled: false,
|
|
||||||
HttpsCertificate: "/etc/ssl/influxdb.pem",
|
|
||||||
}
|
|
||||||
}
|
|
52
Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/config_test.go
generated
vendored
52
Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/config_test.go
generated
vendored
|
@ -1,52 +0,0 @@
|
||||||
package httpd_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
"github.com/influxdb/influxdb/services/httpd"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConfig_Parse(t *testing.T) {
|
|
||||||
// Parse configuration.
|
|
||||||
var c httpd.Config
|
|
||||||
if _, err := toml.Decode(`
|
|
||||||
enabled = true
|
|
||||||
bind-address = ":8080"
|
|
||||||
auth-enabled = true
|
|
||||||
log-enabled = true
|
|
||||||
write-tracing = true
|
|
||||||
pprof-enabled = true
|
|
||||||
https-enabled = true
|
|
||||||
https-certificate = "/dev/null"
|
|
||||||
`, &c); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate configuration.
|
|
||||||
if c.Enabled != true {
|
|
||||||
t.Fatalf("unexpected enabled: %v", c.Enabled)
|
|
||||||
} else if c.BindAddress != ":8080" {
|
|
||||||
t.Fatalf("unexpected bind address: %s", c.BindAddress)
|
|
||||||
} else if c.AuthEnabled != true {
|
|
||||||
t.Fatalf("unexpected auth enabled: %v", c.AuthEnabled)
|
|
||||||
} else if c.LogEnabled != true {
|
|
||||||
t.Fatalf("unexpected log enabled: %v", c.LogEnabled)
|
|
||||||
} else if c.WriteTracing != true {
|
|
||||||
t.Fatalf("unexpected write tracing: %v", c.WriteTracing)
|
|
||||||
} else if c.PprofEnabled != true {
|
|
||||||
t.Fatalf("unexpected pprof enabled: %v", c.PprofEnabled)
|
|
||||||
} else if c.HttpsEnabled != true {
|
|
||||||
t.Fatalf("unexpected https enabled: %v", c.HttpsEnabled)
|
|
||||||
} else if c.HttpsCertificate != "/dev/null" {
|
|
||||||
t.Fatalf("unexpected https certificate: %v", c.HttpsCertificate)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfig_WriteTracing(t *testing.T) {
|
|
||||||
c := httpd.Config{WriteTracing: true}
|
|
||||||
s := httpd.NewService(c)
|
|
||||||
if !s.Handler.WriteTrace {
|
|
||||||
t.Fatalf("write tracing was not set")
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,853 +0,0 @@
|
||||||
package httpd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"net/http/pprof"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/bmizerany/pat"
|
|
||||||
"github.com/influxdb/influxdb"
|
|
||||||
"github.com/influxdb/influxdb/client"
|
|
||||||
"github.com/influxdb/influxdb/cluster"
|
|
||||||
"github.com/influxdb/influxdb/influxql"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/services/continuous_querier"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
"github.com/influxdb/influxdb/uuid"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// With raw data queries, mappers will read up to this amount before sending results back to the engine.
|
|
||||||
// This is the default size in the number of values returned in a raw query. Could be many more bytes depending on fields returned.
|
|
||||||
DefaultChunkSize = 10000
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: Standard response headers (see: HeaderHandler)
|
|
||||||
// TODO: Compression (see: CompressionHeaderHandler)
|
|
||||||
|
|
||||||
// TODO: Check HTTP response codes: 400, 401, 403, 409.
|
|
||||||
|
|
||||||
type route struct {
|
|
||||||
name string
|
|
||||||
method string
|
|
||||||
pattern string
|
|
||||||
gzipped bool
|
|
||||||
log bool
|
|
||||||
handlerFunc interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handler represents an HTTP handler for the InfluxDB server.
|
|
||||||
type Handler struct {
|
|
||||||
mux *pat.PatternServeMux
|
|
||||||
requireAuthentication bool
|
|
||||||
Version string
|
|
||||||
|
|
||||||
MetaStore interface {
|
|
||||||
Database(name string) (*meta.DatabaseInfo, error)
|
|
||||||
Authenticate(username, password string) (ui *meta.UserInfo, err error)
|
|
||||||
Users() ([]meta.UserInfo, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
QueryExecutor interface {
|
|
||||||
Authorize(u *meta.UserInfo, q *influxql.Query, db string) error
|
|
||||||
ExecuteQuery(q *influxql.Query, db string, chunkSize int) (<-chan *influxql.Result, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
PointsWriter interface {
|
|
||||||
WritePoints(p *cluster.WritePointsRequest) error
|
|
||||||
}
|
|
||||||
|
|
||||||
ContinuousQuerier continuous_querier.ContinuousQuerier
|
|
||||||
|
|
||||||
Logger *log.Logger
|
|
||||||
loggingEnabled bool // Log every HTTP access.
|
|
||||||
WriteTrace bool // Detailed logging of write path
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHandler returns a new instance of handler with routes.
|
|
||||||
func NewHandler(requireAuthentication, loggingEnabled, writeTrace bool) *Handler {
|
|
||||||
h := &Handler{
|
|
||||||
mux: pat.New(),
|
|
||||||
requireAuthentication: requireAuthentication,
|
|
||||||
Logger: log.New(os.Stderr, "[http] ", log.LstdFlags),
|
|
||||||
loggingEnabled: loggingEnabled,
|
|
||||||
WriteTrace: writeTrace,
|
|
||||||
}
|
|
||||||
|
|
||||||
h.SetRoutes([]route{
|
|
||||||
route{
|
|
||||||
"query", // Satisfy CORS checks.
|
|
||||||
"OPTIONS", "/query", true, true, h.serveOptions,
|
|
||||||
},
|
|
||||||
route{
|
|
||||||
"query", // Query serving route.
|
|
||||||
"GET", "/query", true, true, h.serveQuery,
|
|
||||||
},
|
|
||||||
route{
|
|
||||||
"write", // Satisfy CORS checks.
|
|
||||||
"OPTIONS", "/write", true, true, h.serveOptions,
|
|
||||||
},
|
|
||||||
route{
|
|
||||||
"write", // Data-ingest route.
|
|
||||||
"POST", "/write", true, true, h.serveWrite,
|
|
||||||
},
|
|
||||||
route{ // Ping
|
|
||||||
"ping",
|
|
||||||
"GET", "/ping", true, true, h.servePing,
|
|
||||||
},
|
|
||||||
route{ // Ping
|
|
||||||
"ping-head",
|
|
||||||
"HEAD", "/ping", true, true, h.servePing,
|
|
||||||
},
|
|
||||||
route{ // Tell data node to run CQs that should be run
|
|
||||||
"process_continuous_queries",
|
|
||||||
"POST", "/data/process_continuous_queries", false, false, h.serveProcessContinuousQueries,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) SetRoutes(routes []route) {
|
|
||||||
for _, r := range routes {
|
|
||||||
var handler http.Handler
|
|
||||||
|
|
||||||
// If it's a handler func that requires authorization, wrap it in authorization
|
|
||||||
if hf, ok := r.handlerFunc.(func(http.ResponseWriter, *http.Request, *meta.UserInfo)); ok {
|
|
||||||
handler = authenticate(hf, h, h.requireAuthentication)
|
|
||||||
}
|
|
||||||
// This is a normal handler signature and does not require authorization
|
|
||||||
if hf, ok := r.handlerFunc.(func(http.ResponseWriter, *http.Request)); ok {
|
|
||||||
handler = http.HandlerFunc(hf)
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.gzipped {
|
|
||||||
handler = gzipFilter(handler)
|
|
||||||
}
|
|
||||||
handler = versionHeader(handler, h)
|
|
||||||
handler = cors(handler)
|
|
||||||
handler = requestID(handler)
|
|
||||||
if h.loggingEnabled && r.log {
|
|
||||||
handler = logging(handler, r.name, h.Logger)
|
|
||||||
}
|
|
||||||
handler = recovery(handler, r.name, h.Logger) // make sure recovery is always last
|
|
||||||
|
|
||||||
h.mux.Add(r.method, r.pattern, handler)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServeHTTP responds to HTTP request to the handler.
|
|
||||||
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// FIXME(benbjohnson): Add pprof enabled flag.
|
|
||||||
if strings.HasPrefix(r.URL.Path, "/debug/pprof") {
|
|
||||||
switch r.URL.Path {
|
|
||||||
case "/debug/pprof/cmdline":
|
|
||||||
pprof.Cmdline(w, r)
|
|
||||||
case "/debug/pprof/profile":
|
|
||||||
pprof.Profile(w, r)
|
|
||||||
case "/debug/pprof/symbol":
|
|
||||||
pprof.Symbol(w, r)
|
|
||||||
default:
|
|
||||||
pprof.Index(w, r)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
h.mux.ServeHTTP(w, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) serveProcessContinuousQueries(w http.ResponseWriter, r *http.Request, user *meta.UserInfo) {
|
|
||||||
// If the continuous query service isn't configured, return 404.
|
|
||||||
if h.ContinuousQuerier == nil {
|
|
||||||
w.WriteHeader(http.StatusNotImplemented)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
q := r.URL.Query()
|
|
||||||
|
|
||||||
// Get the database name (blank means all databases).
|
|
||||||
db := q.Get("db")
|
|
||||||
// Get the name of the CQ to run (blank means run all).
|
|
||||||
name := q.Get("name")
|
|
||||||
|
|
||||||
// Pass the request to the CQ service.
|
|
||||||
if err := h.ContinuousQuerier.Run(db, name); err != nil {
|
|
||||||
w.WriteHeader(http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// serveQuery parses an incoming query and, if valid, executes the query.
|
|
||||||
func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request, user *meta.UserInfo) {
|
|
||||||
q := r.URL.Query()
|
|
||||||
pretty := q.Get("pretty") == "true"
|
|
||||||
|
|
||||||
qp := strings.TrimSpace(q.Get("q"))
|
|
||||||
if qp == "" {
|
|
||||||
httpError(w, `missing required parameter "q"`, pretty, http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
epoch := strings.TrimSpace(q.Get("epoch"))
|
|
||||||
|
|
||||||
p := influxql.NewParser(strings.NewReader(qp))
|
|
||||||
db := q.Get("db")
|
|
||||||
|
|
||||||
// Parse query from query string.
|
|
||||||
query, err := p.ParseQuery()
|
|
||||||
if err != nil {
|
|
||||||
httpError(w, "error parsing query: "+err.Error(), pretty, http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanitize statements with passwords.
|
|
||||||
for _, s := range query.Statements {
|
|
||||||
switch stmt := s.(type) {
|
|
||||||
case *influxql.CreateUserStatement:
|
|
||||||
sanitize(r, stmt.Password)
|
|
||||||
case *influxql.SetPasswordUserStatement:
|
|
||||||
sanitize(r, stmt.Password)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check authorization.
|
|
||||||
if h.requireAuthentication {
|
|
||||||
err = h.QueryExecutor.Authorize(user, query, db)
|
|
||||||
if err != nil {
|
|
||||||
httpError(w, "error authorizing query: "+err.Error(), pretty, http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse chunk size. Use default if not provided or unparsable.
|
|
||||||
chunked := (q.Get("chunked") == "true")
|
|
||||||
chunkSize := DefaultChunkSize
|
|
||||||
if chunked {
|
|
||||||
if n, err := strconv.ParseInt(q.Get("chunk_size"), 10, 64); err == nil {
|
|
||||||
chunkSize = int(n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute query.
|
|
||||||
w.Header().Add("content-type", "application/json")
|
|
||||||
results, err := h.QueryExecutor.ExecuteQuery(query, db, chunkSize)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// if we're not chunking, this will be the in memory buffer for all results before sending to client
|
|
||||||
resp := Response{Results: make([]*influxql.Result, 0)}
|
|
||||||
|
|
||||||
// Status header is OK once this point is reached.
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
|
|
||||||
// pull all results from the channel
|
|
||||||
for r := range results {
|
|
||||||
// Ignore nil results.
|
|
||||||
if r == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// if requested, convert result timestamps to epoch
|
|
||||||
if epoch != "" {
|
|
||||||
convertToEpoch(r, epoch)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write out result immediately if chunked.
|
|
||||||
if chunked {
|
|
||||||
w.Write(MarshalJSON(Response{
|
|
||||||
Results: []*influxql.Result{r},
|
|
||||||
}, pretty))
|
|
||||||
w.(http.Flusher).Flush()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// It's not chunked so buffer results in memory.
|
|
||||||
// Results for statements need to be combined together.
|
|
||||||
// We need to check if this new result is for the same statement as
|
|
||||||
// the last result, or for the next statement
|
|
||||||
l := len(resp.Results)
|
|
||||||
if l == 0 {
|
|
||||||
resp.Results = append(resp.Results, r)
|
|
||||||
} else if resp.Results[l-1].StatementID == r.StatementID {
|
|
||||||
cr := resp.Results[l-1]
|
|
||||||
lastSeries := cr.Series[len(cr.Series)-1]
|
|
||||||
rowsMerged := 0
|
|
||||||
|
|
||||||
for _, row := range r.Series {
|
|
||||||
if !lastSeries.SameSeries(row) {
|
|
||||||
// Next row is for a different series than last.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Values are for the same series, so append them.
|
|
||||||
lastSeries.Values = append(lastSeries.Values, row.Values...)
|
|
||||||
rowsMerged++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append remaining rows as new rows.
|
|
||||||
r.Series = r.Series[rowsMerged:]
|
|
||||||
cr.Series = append(cr.Series, r.Series...)
|
|
||||||
} else {
|
|
||||||
resp.Results = append(resp.Results, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If it's not chunked we buffered everything in memory, so write it out
|
|
||||||
if !chunked {
|
|
||||||
w.Write(MarshalJSON(resp, pretty))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user *meta.UserInfo) {
|
|
||||||
|
|
||||||
// Handle gzip decoding of the body
|
|
||||||
body := r.Body
|
|
||||||
if r.Header.Get("Content-encoding") == "gzip" {
|
|
||||||
b, err := gzip.NewReader(r.Body)
|
|
||||||
if err != nil {
|
|
||||||
h.writeError(w, influxql.Result{Err: err}, http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
body = b
|
|
||||||
}
|
|
||||||
defer body.Close()
|
|
||||||
|
|
||||||
b, err := ioutil.ReadAll(body)
|
|
||||||
if err != nil {
|
|
||||||
if h.WriteTrace {
|
|
||||||
h.Logger.Print("write handler unable to read bytes from request body")
|
|
||||||
}
|
|
||||||
h.writeError(w, influxql.Result{Err: err}, http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if h.WriteTrace {
|
|
||||||
h.Logger.Printf("write body received by handler: %s", string(b))
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.Header.Get("Content-Type") == "application/json" {
|
|
||||||
h.serveWriteJSON(w, r, b, user)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.serveWriteLine(w, r, b, user)
|
|
||||||
}
|
|
||||||
|
|
||||||
// serveWriteJSON receives incoming series data in JSON and writes it to the database.
|
|
||||||
func (h *Handler) serveWriteJSON(w http.ResponseWriter, r *http.Request, body []byte, user *meta.UserInfo) {
|
|
||||||
var bp client.BatchPoints
|
|
||||||
var dec *json.Decoder
|
|
||||||
|
|
||||||
dec = json.NewDecoder(bytes.NewReader(body))
|
|
||||||
|
|
||||||
if err := dec.Decode(&bp); err != nil {
|
|
||||||
if err.Error() == "EOF" {
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resultError(w, influxql.Result{Err: err}, http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if bp.Database == "" {
|
|
||||||
resultError(w, influxql.Result{Err: fmt.Errorf("database is required")}, http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if di, err := h.MetaStore.Database(bp.Database); err != nil {
|
|
||||||
resultError(w, influxql.Result{Err: fmt.Errorf("metastore database error: %s", err)}, http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
} else if di == nil {
|
|
||||||
resultError(w, influxql.Result{Err: fmt.Errorf("database not found: %q", bp.Database)}, http.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.requireAuthentication && user == nil {
|
|
||||||
resultError(w, influxql.Result{Err: fmt.Errorf("user is required to write to database %q", bp.Database)}, http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.requireAuthentication && !user.Authorize(influxql.WritePrivilege, bp.Database) {
|
|
||||||
resultError(w, influxql.Result{Err: fmt.Errorf("%q user is not authorized to write to database %q", user.Name, bp.Database)}, http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
points, err := NormalizeBatchPoints(bp)
|
|
||||||
if err != nil {
|
|
||||||
resultError(w, influxql.Result{Err: err}, http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert the json batch struct to a points writer struct
|
|
||||||
if err := h.PointsWriter.WritePoints(&cluster.WritePointsRequest{
|
|
||||||
Database: bp.Database,
|
|
||||||
RetentionPolicy: bp.RetentionPolicy,
|
|
||||||
ConsistencyLevel: cluster.ConsistencyLevelOne,
|
|
||||||
Points: points,
|
|
||||||
}); influxdb.IsClientError(err) {
|
|
||||||
resultError(w, influxql.Result{Err: err}, http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
} else if err != nil {
|
|
||||||
resultError(w, influxql.Result{Err: err}, http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) writeError(w http.ResponseWriter, result influxql.Result, statusCode int) {
|
|
||||||
w.WriteHeader(statusCode)
|
|
||||||
w.Write([]byte(result.Err.Error()))
|
|
||||||
w.Write([]byte("\n"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// serveWriteLine receives incoming series data in line protocol format and writes it to the database.
|
|
||||||
func (h *Handler) serveWriteLine(w http.ResponseWriter, r *http.Request, body []byte, user *meta.UserInfo) {
|
|
||||||
// Some clients may not set the content-type header appropriately and send JSON with a non-json
|
|
||||||
// content-type. If the body looks JSON, try to handle it as as JSON instead
|
|
||||||
if len(body) > 0 {
|
|
||||||
var i int
|
|
||||||
for {
|
|
||||||
// JSON requests must start w/ an opening bracket
|
|
||||||
if body[i] == '{' {
|
|
||||||
h.serveWriteJSON(w, r, body, user)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// check that the byte is in the standard ascii code range
|
|
||||||
if body[i] > 32 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
i += 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
precision := r.FormValue("precision")
|
|
||||||
if precision == "" {
|
|
||||||
precision = "n"
|
|
||||||
}
|
|
||||||
|
|
||||||
points, err := tsdb.ParsePointsWithPrecision(body, time.Now().UTC(), precision)
|
|
||||||
if err != nil {
|
|
||||||
if err.Error() == "EOF" {
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.writeError(w, influxql.Result{Err: err}, http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
database := r.FormValue("db")
|
|
||||||
if database == "" {
|
|
||||||
h.writeError(w, influxql.Result{Err: fmt.Errorf("database is required")}, http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if di, err := h.MetaStore.Database(database); err != nil {
|
|
||||||
h.writeError(w, influxql.Result{Err: fmt.Errorf("metastore database error: %s", err)}, http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
} else if di == nil {
|
|
||||||
h.writeError(w, influxql.Result{Err: fmt.Errorf("database not found: %q", database)}, http.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.requireAuthentication && user == nil {
|
|
||||||
h.writeError(w, influxql.Result{Err: fmt.Errorf("user is required to write to database %q", database)}, http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.requireAuthentication && !user.Authorize(influxql.WritePrivilege, database) {
|
|
||||||
h.writeError(w, influxql.Result{Err: fmt.Errorf("%q user is not authorized to write to database %q", user.Name, database)}, http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine required consistency level.
|
|
||||||
consistency := cluster.ConsistencyLevelOne
|
|
||||||
switch r.Form.Get("consistency") {
|
|
||||||
case "all":
|
|
||||||
consistency = cluster.ConsistencyLevelAll
|
|
||||||
case "any":
|
|
||||||
consistency = cluster.ConsistencyLevelAny
|
|
||||||
case "one":
|
|
||||||
consistency = cluster.ConsistencyLevelOne
|
|
||||||
case "quorum":
|
|
||||||
consistency = cluster.ConsistencyLevelQuorum
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write points.
|
|
||||||
if err := h.PointsWriter.WritePoints(&cluster.WritePointsRequest{
|
|
||||||
Database: database,
|
|
||||||
RetentionPolicy: r.FormValue("rp"),
|
|
||||||
ConsistencyLevel: consistency,
|
|
||||||
Points: points,
|
|
||||||
}); influxdb.IsClientError(err) {
|
|
||||||
h.writeError(w, influxql.Result{Err: err}, http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
} else if err != nil {
|
|
||||||
h.writeError(w, influxql.Result{Err: err}, http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// serveOptions returns an empty response to comply with OPTIONS pre-flight requests
|
|
||||||
func (h *Handler) serveOptions(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// servePing returns a simple response to let the client know the server is running.
|
|
||||||
func (h *Handler) servePing(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// convertToEpoch converts result timestamps from time.Time to the specified epoch.
|
|
||||||
func convertToEpoch(r *influxql.Result, epoch string) {
|
|
||||||
divisor := int64(1)
|
|
||||||
|
|
||||||
switch epoch {
|
|
||||||
case "u":
|
|
||||||
divisor = int64(time.Microsecond)
|
|
||||||
case "ms":
|
|
||||||
divisor = int64(time.Millisecond)
|
|
||||||
case "s":
|
|
||||||
divisor = int64(time.Second)
|
|
||||||
case "m":
|
|
||||||
divisor = int64(time.Minute)
|
|
||||||
case "h":
|
|
||||||
divisor = int64(time.Hour)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, s := range r.Series {
|
|
||||||
for _, v := range s.Values {
|
|
||||||
if ts, ok := v[0].(time.Time); ok {
|
|
||||||
v[0] = ts.UnixNano() / divisor
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON will marshal v to JSON. Pretty prints if pretty is true.
|
|
||||||
func MarshalJSON(v interface{}, pretty bool) []byte {
|
|
||||||
var b []byte
|
|
||||||
var err error
|
|
||||||
if pretty {
|
|
||||||
b, err = json.MarshalIndent(v, "", " ")
|
|
||||||
} else {
|
|
||||||
b, err = json.Marshal(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return []byte(err.Error())
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
type Point struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Time time.Time `json:"time"`
|
|
||||||
Tags map[string]string `json:"tags"`
|
|
||||||
Fields map[string]interface{} `json:"fields"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Batch struct {
|
|
||||||
Database string `json:"database"`
|
|
||||||
RetentionPolicy string `json:"retentionPolicy"`
|
|
||||||
Points []Point `json:"points"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// httpError writes an error to the client in a standard format.
|
|
||||||
func httpError(w http.ResponseWriter, error string, pretty bool, code int) {
|
|
||||||
w.Header().Add("content-type", "application/json")
|
|
||||||
w.WriteHeader(code)
|
|
||||||
|
|
||||||
response := Response{Err: errors.New(error)}
|
|
||||||
var b []byte
|
|
||||||
if pretty {
|
|
||||||
b, _ = json.MarshalIndent(response, "", " ")
|
|
||||||
} else {
|
|
||||||
b, _ = json.Marshal(response)
|
|
||||||
}
|
|
||||||
w.Write(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resultError(w http.ResponseWriter, result influxql.Result, code int) {
|
|
||||||
w.Header().Add("content-type", "application/json")
|
|
||||||
w.WriteHeader(code)
|
|
||||||
_ = json.NewEncoder(w).Encode(&result)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filters and filter helpers
|
|
||||||
|
|
||||||
// parseCredentials returns the username and password encoded in
|
|
||||||
// a request. The credentials may be present as URL query params, or as
|
|
||||||
// a Basic Authentication header.
|
|
||||||
// as params: http://127.0.0.1/query?u=username&p=password
|
|
||||||
// as basic auth: http://username:password@127.0.0.1
|
|
||||||
func parseCredentials(r *http.Request) (string, string, error) {
|
|
||||||
q := r.URL.Query()
|
|
||||||
|
|
||||||
if u, p := q.Get("u"), q.Get("p"); u != "" && p != "" {
|
|
||||||
return u, p, nil
|
|
||||||
}
|
|
||||||
if u, p, ok := r.BasicAuth(); ok {
|
|
||||||
return u, p, nil
|
|
||||||
}
|
|
||||||
return "", "", fmt.Errorf("unable to parse Basic Auth credentials")
|
|
||||||
}
|
|
||||||
|
|
||||||
// authenticate wraps a handler and ensures that if user credentials are passed in
|
|
||||||
// an attempt is made to authenticate that user. If authentication fails, an error is returned.
|
|
||||||
//
|
|
||||||
// There is one exception: if there are no users in the system, authentication is not required. This
|
|
||||||
// is to facilitate bootstrapping of a system with authentication enabled.
|
|
||||||
func authenticate(inner func(http.ResponseWriter, *http.Request, *meta.UserInfo), h *Handler, requireAuthentication bool) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// Return early if we are not authenticating
|
|
||||||
if !requireAuthentication {
|
|
||||||
inner(w, r, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var user *meta.UserInfo
|
|
||||||
|
|
||||||
// Retrieve user list.
|
|
||||||
uis, err := h.MetaStore.Users()
|
|
||||||
if err != nil {
|
|
||||||
httpError(w, err.Error(), false, http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO corylanou: never allow this in the future without users
|
|
||||||
if requireAuthentication && len(uis) > 0 {
|
|
||||||
username, password, err := parseCredentials(r)
|
|
||||||
if err != nil {
|
|
||||||
httpError(w, err.Error(), false, http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if username == "" {
|
|
||||||
httpError(w, "username required", false, http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
user, err = h.MetaStore.Authenticate(username, password)
|
|
||||||
if err != nil {
|
|
||||||
httpError(w, err.Error(), false, http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
inner(w, r, user)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
type gzipResponseWriter struct {
|
|
||||||
io.Writer
|
|
||||||
http.ResponseWriter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w gzipResponseWriter) Write(b []byte) (int, error) {
|
|
||||||
return w.Writer.Write(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w gzipResponseWriter) Flush() {
|
|
||||||
w.Writer.(*gzip.Writer).Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
// determines if the client can accept compressed responses, and encodes accordingly
|
|
||||||
func gzipFilter(inner http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
|
||||||
inner.ServeHTTP(w, r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.Header().Set("Content-Encoding", "gzip")
|
|
||||||
gz := gzip.NewWriter(w)
|
|
||||||
defer gz.Close()
|
|
||||||
gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w}
|
|
||||||
inner.ServeHTTP(gzw, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// versionHeader takes a HTTP handler and returns a HTTP handler
|
|
||||||
// and adds the X-INFLUXBD-VERSION header to outgoing responses.
|
|
||||||
func versionHeader(inner http.Handler, h *Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.Header().Add("X-InfluxDB-Version", h.Version)
|
|
||||||
inner.ServeHTTP(w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// cors responds to incoming requests and adds the appropriate cors headers
|
|
||||||
// TODO: corylanou: add the ability to configure this in our config
|
|
||||||
func cors(inner http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if origin := r.Header.Get("Origin"); origin != "" {
|
|
||||||
w.Header().Set(`Access-Control-Allow-Origin`, origin)
|
|
||||||
w.Header().Set(`Access-Control-Allow-Methods`, strings.Join([]string{
|
|
||||||
`DELETE`,
|
|
||||||
`GET`,
|
|
||||||
`OPTIONS`,
|
|
||||||
`POST`,
|
|
||||||
`PUT`,
|
|
||||||
}, ", "))
|
|
||||||
|
|
||||||
w.Header().Set(`Access-Control-Allow-Headers`, strings.Join([]string{
|
|
||||||
`Accept`,
|
|
||||||
`Accept-Encoding`,
|
|
||||||
`Authorization`,
|
|
||||||
`Content-Length`,
|
|
||||||
`Content-Type`,
|
|
||||||
`X-CSRF-Token`,
|
|
||||||
`X-HTTP-Method-Override`,
|
|
||||||
}, ", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.Method == "OPTIONS" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
inner.ServeHTTP(w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func requestID(inner http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
uid := uuid.TimeUUID()
|
|
||||||
r.Header.Set("Request-Id", uid.String())
|
|
||||||
w.Header().Set("Request-Id", r.Header.Get("Request-Id"))
|
|
||||||
|
|
||||||
inner.ServeHTTP(w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func logging(inner http.Handler, name string, weblog *log.Logger) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
start := time.Now()
|
|
||||||
l := &responseLogger{w: w}
|
|
||||||
inner.ServeHTTP(l, r)
|
|
||||||
logLine := buildLogLine(l, r, start)
|
|
||||||
weblog.Println(logLine)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func recovery(inner http.Handler, name string, weblog *log.Logger) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
start := time.Now()
|
|
||||||
l := &responseLogger{w: w}
|
|
||||||
inner.ServeHTTP(l, r)
|
|
||||||
if err := recover(); err != nil {
|
|
||||||
logLine := buildLogLine(l, r, start)
|
|
||||||
logLine = fmt.Sprintf(`%s [err:%s]`, logLine, err)
|
|
||||||
weblog.Println(logLine)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Response represents a list of statement results.
|
|
||||||
type Response struct {
|
|
||||||
Results []*influxql.Result
|
|
||||||
Err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON encodes a Response struct into JSON.
|
|
||||||
func (r Response) MarshalJSON() ([]byte, error) {
|
|
||||||
// Define a struct that outputs "error" as a string.
|
|
||||||
var o struct {
|
|
||||||
Results []*influxql.Result `json:"results,omitempty"`
|
|
||||||
Err string `json:"error,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy fields to output struct.
|
|
||||||
o.Results = r.Results
|
|
||||||
if r.Err != nil {
|
|
||||||
o.Err = r.Err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(&o)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON decodes the data into the Response struct
|
|
||||||
func (r *Response) UnmarshalJSON(b []byte) error {
|
|
||||||
var o struct {
|
|
||||||
Results []*influxql.Result `json:"results,omitempty"`
|
|
||||||
Err string `json:"error,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
err := json.Unmarshal(b, &o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.Results = o.Results
|
|
||||||
if o.Err != "" {
|
|
||||||
r.Err = errors.New(o.Err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns the first error from any statement.
|
|
||||||
// Returns nil if no errors occurred on any statements.
|
|
||||||
func (r *Response) Error() error {
|
|
||||||
if r.Err != nil {
|
|
||||||
return r.Err
|
|
||||||
}
|
|
||||||
for _, rr := range r.Results {
|
|
||||||
if rr.Err != nil {
|
|
||||||
return rr.Err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NormalizeBatchPoints returns a slice of Points, created by populating individual
|
|
||||||
// points within the batch, which do not have times or tags, with the top-level
|
|
||||||
// values.
|
|
||||||
func NormalizeBatchPoints(bp client.BatchPoints) ([]tsdb.Point, error) {
|
|
||||||
points := []tsdb.Point{}
|
|
||||||
for _, p := range bp.Points {
|
|
||||||
if p.Time.IsZero() {
|
|
||||||
if bp.Time.IsZero() {
|
|
||||||
p.Time = time.Now()
|
|
||||||
} else {
|
|
||||||
p.Time = bp.Time
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.Precision == "" && bp.Precision != "" {
|
|
||||||
p.Precision = bp.Precision
|
|
||||||
}
|
|
||||||
p.Time = client.SetPrecision(p.Time, p.Precision)
|
|
||||||
if len(bp.Tags) > 0 {
|
|
||||||
if p.Tags == nil {
|
|
||||||
p.Tags = make(map[string]string)
|
|
||||||
}
|
|
||||||
for k := range bp.Tags {
|
|
||||||
if p.Tags[k] == "" {
|
|
||||||
p.Tags[k] = bp.Tags[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Measurement == "" {
|
|
||||||
return points, fmt.Errorf("missing measurement")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(p.Fields) == 0 {
|
|
||||||
return points, fmt.Errorf("missing fields")
|
|
||||||
}
|
|
||||||
// Need to convert from a client.Point to a influxdb.Point
|
|
||||||
points = append(points, tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time))
|
|
||||||
}
|
|
||||||
|
|
||||||
return points, nil
|
|
||||||
}
|
|
448
Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/handler_test.go
generated
vendored
448
Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/handler_test.go
generated
vendored
|
@ -1,448 +0,0 @@
|
||||||
package httpd_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/influxdb/influxdb/client"
|
|
||||||
"github.com/influxdb/influxdb/influxql"
|
|
||||||
"github.com/influxdb/influxdb/meta"
|
|
||||||
"github.com/influxdb/influxdb/services/httpd"
|
|
||||||
"github.com/influxdb/influxdb/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBatchWrite_UnmarshalEpoch(t *testing.T) {
|
|
||||||
now := time.Now().UTC()
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
epoch int64
|
|
||||||
precision string
|
|
||||||
expected time.Time
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "nanoseconds",
|
|
||||||
epoch: now.UnixNano(),
|
|
||||||
precision: "n",
|
|
||||||
expected: now,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "microseconds",
|
|
||||||
epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond),
|
|
||||||
precision: "u",
|
|
||||||
expected: now.Round(time.Microsecond),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "milliseconds",
|
|
||||||
epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond),
|
|
||||||
precision: "ms",
|
|
||||||
expected: now.Round(time.Millisecond),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "seconds",
|
|
||||||
epoch: now.Round(time.Second).UnixNano() / int64(time.Second),
|
|
||||||
precision: "s",
|
|
||||||
expected: now.Round(time.Second),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "minutes",
|
|
||||||
epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute),
|
|
||||||
precision: "m",
|
|
||||||
expected: now.Round(time.Minute),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "hours",
|
|
||||||
epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour),
|
|
||||||
precision: "h",
|
|
||||||
expected: now.Round(time.Hour),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "max int64",
|
|
||||||
epoch: 9223372036854775807,
|
|
||||||
precision: "n",
|
|
||||||
expected: time.Unix(0, 9223372036854775807),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "100 years from now",
|
|
||||||
epoch: now.Add(time.Hour * 24 * 365 * 100).UnixNano(),
|
|
||||||
precision: "n",
|
|
||||||
expected: now.Add(time.Hour * 24 * 365 * 100),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
t.Logf("testing %q\n", test.name)
|
|
||||||
data := []byte(fmt.Sprintf(`{"time": %d, "precision":"%s"}`, test.epoch, test.precision))
|
|
||||||
t.Logf("json: %s", string(data))
|
|
||||||
var bp client.BatchPoints
|
|
||||||
err := json.Unmarshal(data, &bp)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error. expected: %v, actual: %v", nil, err)
|
|
||||||
}
|
|
||||||
if !bp.Time.Equal(test.expected) {
|
|
||||||
t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, bp.Time)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBatchWrite_UnmarshalRFC(t *testing.T) {
|
|
||||||
now := time.Now()
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
rfc string
|
|
||||||
now time.Time
|
|
||||||
expected time.Time
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "RFC3339Nano",
|
|
||||||
rfc: time.RFC3339Nano,
|
|
||||||
now: now,
|
|
||||||
expected: now,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "RFC3339",
|
|
||||||
rfc: time.RFC3339,
|
|
||||||
now: now.Round(time.Second),
|
|
||||||
expected: now.Round(time.Second),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
t.Logf("testing %q\n", test.name)
|
|
||||||
ts := test.now.Format(test.rfc)
|
|
||||||
data := []byte(fmt.Sprintf(`{"time": %q}`, ts))
|
|
||||||
t.Logf("json: %s", string(data))
|
|
||||||
var bp client.BatchPoints
|
|
||||||
err := json.Unmarshal(data, &bp)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err)
|
|
||||||
}
|
|
||||||
if !bp.Time.Equal(test.expected) {
|
|
||||||
t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, bp.Time)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the handler returns results from a query (including nil results).
|
|
||||||
func TestHandler_Query(t *testing.T) {
|
|
||||||
h := NewHandler(false)
|
|
||||||
h.QueryExecutor.ExecuteQueryFn = func(q *influxql.Query, db string, chunkSize int) (<-chan *influxql.Result, error) {
|
|
||||||
if q.String() != `SELECT * FROM bar` {
|
|
||||||
t.Fatalf("unexpected query: %s", q.String())
|
|
||||||
} else if db != `foo` {
|
|
||||||
t.Fatalf("unexpected db: %s", db)
|
|
||||||
}
|
|
||||||
return NewResultChan(
|
|
||||||
&influxql.Result{StatementID: 1, Series: influxql.Rows{{Name: "series0"}}},
|
|
||||||
&influxql.Result{StatementID: 2, Series: influxql.Rows{{Name: "series1"}}},
|
|
||||||
nil,
|
|
||||||
), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?db=foo&q=SELECT+*+FROM+bar", nil))
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Fatalf("unexpected status: %d", w.Code)
|
|
||||||
} else if w.Body.String() != `{"results":[{"series":[{"name":"series0"}]},{"series":[{"name":"series1"}]}]}` {
|
|
||||||
t.Fatalf("unexpected body: %s", w.Body.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the handler merges results from the same statement.
|
|
||||||
func TestHandler_Query_MergeResults(t *testing.T) {
|
|
||||||
h := NewHandler(false)
|
|
||||||
h.QueryExecutor.ExecuteQueryFn = func(q *influxql.Query, db string, chunkSize int) (<-chan *influxql.Result, error) {
|
|
||||||
return NewResultChan(
|
|
||||||
&influxql.Result{StatementID: 1, Series: influxql.Rows{{Name: "series0"}}},
|
|
||||||
&influxql.Result{StatementID: 1, Series: influxql.Rows{{Name: "series1"}}},
|
|
||||||
), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?db=foo&q=SELECT+*+FROM+bar", nil))
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Fatalf("unexpected status: %d", w.Code)
|
|
||||||
} else if w.Body.String() != `{"results":[{"series":[{"name":"series0"},{"name":"series1"}]}]}` {
|
|
||||||
t.Fatalf("unexpected body: %s", w.Body.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the handler can parse chunked and chunk size query parameters.
|
|
||||||
func TestHandler_Query_Chunked(t *testing.T) {
|
|
||||||
h := NewHandler(false)
|
|
||||||
h.QueryExecutor.ExecuteQueryFn = func(q *influxql.Query, db string, chunkSize int) (<-chan *influxql.Result, error) {
|
|
||||||
if chunkSize != 2 {
|
|
||||||
t.Fatalf("unexpected chunk size: %d", chunkSize)
|
|
||||||
}
|
|
||||||
return NewResultChan(
|
|
||||||
&influxql.Result{StatementID: 1, Series: influxql.Rows{{Name: "series0"}}},
|
|
||||||
&influxql.Result{StatementID: 1, Series: influxql.Rows{{Name: "series1"}}},
|
|
||||||
), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?db=foo&q=SELECT+*+FROM+bar&chunked=true&chunk_size=2", nil))
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Fatalf("unexpected status: %d", w.Code)
|
|
||||||
} else if w.Body.String() != `{"results":[{"series":[{"name":"series0"}]}]}{"results":[{"series":[{"name":"series1"}]}]}` {
|
|
||||||
t.Fatalf("unexpected body: %s", w.Body.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the handler returns a status 400 if the query is not passed in.
|
|
||||||
func TestHandler_Query_ErrQueryRequired(t *testing.T) {
|
|
||||||
h := NewHandler(false)
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query", nil))
|
|
||||||
if w.Code != http.StatusBadRequest {
|
|
||||||
t.Fatalf("unexpected status: %d", w.Code)
|
|
||||||
} else if w.Body.String() != `{"error":"missing required parameter \"q\""}` {
|
|
||||||
t.Fatalf("unexpected body: %s", w.Body.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the handler returns a status 400 if the query cannot be parsed.
|
|
||||||
func TestHandler_Query_ErrInvalidQuery(t *testing.T) {
|
|
||||||
h := NewHandler(false)
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?q=SELECT", nil))
|
|
||||||
if w.Code != http.StatusBadRequest {
|
|
||||||
t.Fatalf("unexpected status: %d", w.Code)
|
|
||||||
} else if w.Body.String() != `{"error":"error parsing query: found EOF, expected identifier, string, number, bool at line 1, char 8"}` {
|
|
||||||
t.Fatalf("unexpected body: %s", w.Body.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the handler returns a status 401 if the user is not authorized.
|
|
||||||
// func TestHandler_Query_ErrUnauthorized(t *testing.T) {
|
|
||||||
// h := NewHandler(false)
|
|
||||||
// h.QueryExecutor.AuthorizeFn = func(u *meta.UserInfo, q *influxql.Query, db string) error {
|
|
||||||
// return errors.New("marker")
|
|
||||||
// }
|
|
||||||
|
|
||||||
// w := httptest.NewRecorder()
|
|
||||||
// h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?u=bar&db=foo&q=SHOW+SERIES+FROM+bar", nil))
|
|
||||||
// if w.Code != http.StatusUnauthorized {
|
|
||||||
// t.Fatalf("unexpected status: %d", w.Code)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// Ensure the handler returns a status 500 if an error is returned from the query executor.
|
|
||||||
func TestHandler_Query_ErrExecuteQuery(t *testing.T) {
|
|
||||||
h := NewHandler(false)
|
|
||||||
h.QueryExecutor.ExecuteQueryFn = func(q *influxql.Query, db string, chunkSize int) (<-chan *influxql.Result, error) {
|
|
||||||
return nil, errors.New("marker")
|
|
||||||
}
|
|
||||||
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?db=foo&q=SHOW+SERIES+FROM+bar", nil))
|
|
||||||
if w.Code != http.StatusInternalServerError {
|
|
||||||
t.Fatalf("unexpected status: %d", w.Code)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the handler returns a status 200 if an error is returned in the result.
|
|
||||||
func TestHandler_Query_ErrResult(t *testing.T) {
|
|
||||||
h := NewHandler(false)
|
|
||||||
h.QueryExecutor.ExecuteQueryFn = func(q *influxql.Query, db string, chunkSize int) (<-chan *influxql.Result, error) {
|
|
||||||
return NewResultChan(&influxql.Result{Err: errors.New("measurement not found")}), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
h.ServeHTTP(w, MustNewJSONRequest("GET", "/query?db=foo&q=SHOW+SERIES+from+bin", nil))
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Fatalf("unexpected status: %d", w.Code)
|
|
||||||
} else if w.Body.String() != `{"results":[{"error":"measurement not found"}]}` {
|
|
||||||
t.Fatalf("unexpected body: %s", w.Body.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshalJSON_NoPretty(t *testing.T) {
|
|
||||||
if b := httpd.MarshalJSON(struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
}{Name: "foo"}, false); string(b) != `{"name":"foo"}` {
|
|
||||||
t.Fatalf("unexpected bytes: %s", b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshalJSON_Pretty(t *testing.T) {
|
|
||||||
if b := httpd.MarshalJSON(struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
}{Name: "foo"}, true); string(b) != "{\n \"name\": \"foo\"\n}" {
|
|
||||||
t.Fatalf("unexpected bytes: %q", string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarshalJSON_Error(t *testing.T) {
|
|
||||||
if b := httpd.MarshalJSON(&invalidJSON{}, true); string(b) != "json: error calling MarshalJSON for type *httpd_test.invalidJSON: marker" {
|
|
||||||
t.Fatalf("unexpected bytes: %q", string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type invalidJSON struct{}
|
|
||||||
|
|
||||||
func (*invalidJSON) MarshalJSON() ([]byte, error) { return nil, errors.New("marker") }
|
|
||||||
|
|
||||||
func TestNormalizeBatchPoints(t *testing.T) {
|
|
||||||
now := time.Now()
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
bp client.BatchPoints
|
|
||||||
p []tsdb.Point
|
|
||||||
err string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "default",
|
|
||||||
bp: client.BatchPoints{
|
|
||||||
Points: []client.Point{
|
|
||||||
{Measurement: "cpu", Tags: map[string]string{"region": "useast"}, Time: now, Fields: map[string]interface{}{"value": 1.0}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
p: []tsdb.Point{
|
|
||||||
tsdb.NewPoint("cpu", map[string]string{"region": "useast"}, map[string]interface{}{"value": 1.0}, now),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "merge time",
|
|
||||||
bp: client.BatchPoints{
|
|
||||||
Time: now,
|
|
||||||
Points: []client.Point{
|
|
||||||
{Measurement: "cpu", Tags: map[string]string{"region": "useast"}, Fields: map[string]interface{}{"value": 1.0}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
p: []tsdb.Point{
|
|
||||||
tsdb.NewPoint("cpu", map[string]string{"region": "useast"}, map[string]interface{}{"value": 1.0}, now),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "merge tags",
|
|
||||||
bp: client.BatchPoints{
|
|
||||||
Tags: map[string]string{"day": "monday"},
|
|
||||||
Points: []client.Point{
|
|
||||||
{Measurement: "cpu", Tags: map[string]string{"region": "useast"}, Time: now, Fields: map[string]interface{}{"value": 1.0}},
|
|
||||||
{Measurement: "memory", Time: now, Fields: map[string]interface{}{"value": 2.0}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
p: []tsdb.Point{
|
|
||||||
tsdb.NewPoint("cpu", map[string]string{"day": "monday", "region": "useast"}, map[string]interface{}{"value": 1.0}, now),
|
|
||||||
tsdb.NewPoint("memory", map[string]string{"day": "monday"}, map[string]interface{}{"value": 2.0}, now),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
t.Logf("running test %q", test.name)
|
|
||||||
p, e := httpd.NormalizeBatchPoints(test.bp)
|
|
||||||
if test.err == "" && e != nil {
|
|
||||||
t.Errorf("unexpected error %v", e)
|
|
||||||
} else if test.err != "" && e == nil {
|
|
||||||
t.Errorf("expected error %s, got <nil>", test.err)
|
|
||||||
} else if e != nil && test.err != e.Error() {
|
|
||||||
t.Errorf("unexpected error. expected: %s, got %v", test.err, e)
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(p, test.p) {
|
|
||||||
t.Logf("expected: %+v", test.p)
|
|
||||||
t.Logf("got: %+v", p)
|
|
||||||
t.Error("failed to normalize.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHandler represents a test wrapper for httpd.Handler.
|
|
||||||
type Handler struct {
|
|
||||||
*httpd.Handler
|
|
||||||
MetaStore HandlerMetaStore
|
|
||||||
QueryExecutor HandlerQueryExecutor
|
|
||||||
TSDBStore HandlerTSDBStore
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHandler returns a new instance of Handler.
|
|
||||||
func NewHandler(requireAuthentication bool) *Handler {
|
|
||||||
h := &Handler{
|
|
||||||
Handler: httpd.NewHandler(requireAuthentication, true, false),
|
|
||||||
}
|
|
||||||
h.Handler.MetaStore = &h.MetaStore
|
|
||||||
h.Handler.QueryExecutor = &h.QueryExecutor
|
|
||||||
h.Handler.Version = "0.0.0"
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerMetaStore is a mock implementation of Handler.MetaStore.
|
|
||||||
type HandlerMetaStore struct {
|
|
||||||
DatabaseFn func(name string) (*meta.DatabaseInfo, error)
|
|
||||||
AuthenticateFn func(username, password string) (ui *meta.UserInfo, err error)
|
|
||||||
UsersFn func() ([]meta.UserInfo, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *HandlerMetaStore) Database(name string) (*meta.DatabaseInfo, error) {
|
|
||||||
return s.DatabaseFn(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *HandlerMetaStore) Authenticate(username, password string) (ui *meta.UserInfo, err error) {
|
|
||||||
return s.AuthenticateFn(username, password)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *HandlerMetaStore) Users() ([]meta.UserInfo, error) {
|
|
||||||
return s.UsersFn()
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerQueryExecutor is a mock implementation of Handler.QueryExecutor.
|
|
||||||
type HandlerQueryExecutor struct {
|
|
||||||
AuthorizeFn func(u *meta.UserInfo, q *influxql.Query, db string) error
|
|
||||||
ExecuteQueryFn func(q *influxql.Query, db string, chunkSize int) (<-chan *influxql.Result, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *HandlerQueryExecutor) Authorize(u *meta.UserInfo, q *influxql.Query, db string) error {
|
|
||||||
return e.AuthorizeFn(u, q, db)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *HandlerQueryExecutor) ExecuteQuery(q *influxql.Query, db string, chunkSize int) (<-chan *influxql.Result, error) {
|
|
||||||
return e.ExecuteQueryFn(q, db, chunkSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerTSDBStore is a mock implementation of Handler.TSDBStore
|
|
||||||
type HandlerTSDBStore struct {
|
|
||||||
CreateMapperFn func(shardID uint64, query string, chunkSize int) (tsdb.Mapper, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *HandlerTSDBStore) CreateMapper(shardID uint64, query string, chunkSize int) (tsdb.Mapper, error) {
|
|
||||||
return h.CreateMapperFn(shardID, query, chunkSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustNewRequest returns a new HTTP request. Panic on error.
|
|
||||||
func MustNewRequest(method, urlStr string, body io.Reader) *http.Request {
|
|
||||||
r, err := http.NewRequest(method, urlStr, body)
|
|
||||||
if err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustNewRequest returns a new HTTP request with the content type set. Panic on error.
|
|
||||||
func MustNewJSONRequest(method, urlStr string, body io.Reader) *http.Request {
|
|
||||||
r := MustNewRequest(method, urlStr, body)
|
|
||||||
r.Header.Set("Content-Type", "application/json")
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// matchRegex returns true if a s matches pattern.
|
|
||||||
func matchRegex(pattern, s string) bool {
|
|
||||||
return regexp.MustCompile(pattern).MatchString(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewResultChan returns a channel that sends all results and then closes.
|
|
||||||
func NewResultChan(results ...*influxql.Result) <-chan *influxql.Result {
|
|
||||||
ch := make(chan *influxql.Result, len(results))
|
|
||||||
for _, r := range results {
|
|
||||||
ch <- r
|
|
||||||
}
|
|
||||||
close(ch)
|
|
||||||
return ch
|
|
||||||
}
|
|
153
Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/response_logger.go
generated
vendored
153
Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/response_logger.go
generated
vendored
|
@ -1,153 +0,0 @@
|
||||||
package httpd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type loggingResponseWriter interface {
|
|
||||||
http.ResponseWriter
|
|
||||||
Status() int
|
|
||||||
Size() int
|
|
||||||
}
|
|
||||||
|
|
||||||
// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP status
|
|
||||||
// code and body size
|
|
||||||
type responseLogger struct {
|
|
||||||
w http.ResponseWriter
|
|
||||||
status int
|
|
||||||
size int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *responseLogger) Header() http.Header {
|
|
||||||
return l.w.Header()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *responseLogger) Flush() {
|
|
||||||
l.w.(http.Flusher).Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *responseLogger) Write(b []byte) (int, error) {
|
|
||||||
if l.status == 0 {
|
|
||||||
// Set status if WriteHeader has not been called
|
|
||||||
l.status = http.StatusOK
|
|
||||||
}
|
|
||||||
size, err := l.w.Write(b)
|
|
||||||
l.size += size
|
|
||||||
return size, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *responseLogger) WriteHeader(s int) {
|
|
||||||
l.w.WriteHeader(s)
|
|
||||||
l.status = s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *responseLogger) Status() int {
|
|
||||||
if l.status == 0 {
|
|
||||||
// This can happen if we never actually write data, but only set response headers.
|
|
||||||
l.status = http.StatusOK
|
|
||||||
}
|
|
||||||
return l.status
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *responseLogger) Size() int {
|
|
||||||
return l.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// redact any occurrence of a password parameter, 'p'
|
|
||||||
func redactPassword(r *http.Request) {
|
|
||||||
q := r.URL.Query()
|
|
||||||
if p := q.Get("p"); p != "" {
|
|
||||||
q.Set("p", "[REDACTED]")
|
|
||||||
r.URL.RawQuery = q.Encode()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Common Log Format: http://en.wikipedia.org/wiki/Common_Log_Format
|
|
||||||
|
|
||||||
// buildLogLine creates a common log format
|
|
||||||
// in addition to the common fields, we also append referrer, user agent and request ID
|
|
||||||
func buildLogLine(l *responseLogger, r *http.Request, start time.Time) string {
|
|
||||||
|
|
||||||
redactPassword(r)
|
|
||||||
|
|
||||||
username := parseUsername(r)
|
|
||||||
|
|
||||||
host, _, err := net.SplitHostPort(r.RemoteAddr)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
host = r.RemoteAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
uri := r.URL.RequestURI()
|
|
||||||
|
|
||||||
referer := r.Referer()
|
|
||||||
|
|
||||||
userAgent := r.UserAgent()
|
|
||||||
|
|
||||||
fields := []string{
|
|
||||||
host,
|
|
||||||
"-",
|
|
||||||
detect(username, "-"),
|
|
||||||
fmt.Sprintf("[%s]", start.Format("02/Jan/2006:15:04:05 -0700")),
|
|
||||||
r.Method,
|
|
||||||
uri,
|
|
||||||
r.Proto,
|
|
||||||
detect(strconv.Itoa(l.Status()), "-"),
|
|
||||||
strconv.Itoa(l.Size()),
|
|
||||||
detect(referer, "-"),
|
|
||||||
detect(userAgent, "-"),
|
|
||||||
r.Header.Get("Request-Id"),
|
|
||||||
fmt.Sprintf("%s", time.Since(start)),
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.Join(fields, " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// detect detects the first presense of a non blank string and returns it
|
|
||||||
func detect(values ...string) string {
|
|
||||||
for _, v := range values {
|
|
||||||
if v != "" {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// parses the username either from the url or auth header
|
|
||||||
func parseUsername(r *http.Request) string {
|
|
||||||
var (
|
|
||||||
username = ""
|
|
||||||
url = r.URL
|
|
||||||
)
|
|
||||||
|
|
||||||
// get username from the url if passed there
|
|
||||||
if url.User != nil {
|
|
||||||
if name := url.User.Username(); name != "" {
|
|
||||||
username = name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to get the username from the query param 'u'
|
|
||||||
q := url.Query()
|
|
||||||
if u := q.Get("u"); u != "" {
|
|
||||||
username = u
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to get it from the authorization header if set there
|
|
||||||
if username == "" {
|
|
||||||
if u, _, ok := r.BasicAuth(); ok {
|
|
||||||
username = u
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return username
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanitize passwords from query string for logging.
|
|
||||||
func sanitize(r *http.Request, s string) {
|
|
||||||
r.URL.RawQuery = strings.Replace(r.URL.RawQuery, s, "[REDACTED]", -1)
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue